1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 /* 33 * Common routines to manage event timers hardware. 34 */ 35 36 #include "opt_device_polling.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/kdb.h> 44 #include <sys/ktr.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/kernel.h> 48 #include <sys/sched.h> 49 #include <sys/smp.h> 50 #include <sys/sysctl.h> 51 #include <sys/timeet.h> 52 #include <sys/timetc.h> 53 54 #include <machine/atomic.h> 55 #include <machine/clock.h> 56 #include <machine/cpu.h> 57 #include <machine/smp.h> 58 59 int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */ 60 int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */ 61 62 static void setuptimer(void); 63 static void loadtimer(sbintime_t now, int first); 64 static int doconfigtimer(void); 65 static void configtimer(int start); 66 static int round_freq(struct eventtimer *et, int freq); 67 68 struct pcpu_state; 69 static sbintime_t getnextcpuevent(struct pcpu_state *state, int idle); 70 static sbintime_t getnextevent(struct pcpu_state *state); 71 static int handleevents(sbintime_t now, int fake); 72 73 static struct mtx et_hw_mtx; 74 75 #define ET_HW_LOCK(state) \ 76 { \ 77 if (timer->et_flags & ET_FLAGS_PERCPU) \ 78 mtx_lock_spin(&(state)->et_hw_mtx); \ 79 else \ 80 mtx_lock_spin(&et_hw_mtx); \ 81 } 82 83 #define ET_HW_UNLOCK(state) \ 84 { \ 85 if (timer->et_flags & ET_FLAGS_PERCPU) \ 86 mtx_unlock_spin(&(state)->et_hw_mtx); \ 87 else \ 88 mtx_unlock_spin(&et_hw_mtx); \ 89 } 90 91 static struct eventtimer *timer = NULL; 92 static sbintime_t timerperiod; /* Timer period for periodic mode. */ 93 static sbintime_t statperiod; /* statclock() events period. */ 94 static sbintime_t profperiod; /* profclock() events period. */ 95 static sbintime_t nexttick; /* Next global timer tick time. */ 96 static u_int busy = 1; /* Reconfiguration is in progress. */ 97 static int profiling; /* Profiling events enabled. */ 98 99 static char timername[32]; /* Wanted timer. */ 100 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 101 102 static int singlemul; /* Multiplier for periodic mode. */ 103 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul, 104 0, "Multiplier for periodic mode"); 105 106 static u_int idletick; /* Run periodic events when idle. */ 107 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 108 0, "Run periodic events when idle"); 109 110 static int periodic; /* Periodic or one-shot mode. */ 111 static int want_periodic; /* What mode to prefer. */ 112 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 113 114 struct pcpu_state { 115 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 116 u_int action; /* Reconfiguration requests. */ 117 u_int handle; /* Immediate handle resuests. */ 118 sbintime_t now; /* Last tick time. */ 119 sbintime_t nextevent; /* Next scheduled event on this CPU. */ 120 sbintime_t nexttick; /* Next timer tick time. */ 121 sbintime_t nexthard; /* Next hardclock() event. */ 122 sbintime_t nextstat; /* Next statclock() event. */ 123 sbintime_t nextprof; /* Next profclock() event. */ 124 sbintime_t nextcall; /* Next callout event. */ 125 sbintime_t nextcallopt; /* Next optional callout event. */ 126 int ipi; /* This CPU needs IPI. */ 127 int idle; /* This CPU is in idle mode. */ 128 }; 129 130 DPCPU_DEFINE_STATIC(struct pcpu_state, timerstate); 131 DPCPU_DEFINE(sbintime_t, hardclocktime); 132 133 /* 134 * Timer broadcast IPI handler. 135 */ 136 int 137 hardclockintr(void) 138 { 139 sbintime_t now; 140 struct pcpu_state *state; 141 int done; 142 143 if (doconfigtimer() || busy) 144 return (FILTER_HANDLED); 145 state = DPCPU_PTR(timerstate); 146 now = state->now; 147 CTR2(KTR_SPARE2, "ipi: now %d.%08x", 148 (int)(now >> 32), (u_int)(now & 0xffffffff)); 149 done = handleevents(now, 0); 150 return (done ? FILTER_HANDLED : FILTER_STRAY); 151 } 152 153 /* 154 * Handle all events for specified time on this CPU 155 */ 156 static int 157 handleevents(sbintime_t now, int fake) 158 { 159 sbintime_t t, *hct; 160 struct trapframe *frame; 161 struct pcpu_state *state; 162 int usermode; 163 int done, runs; 164 165 CTR2(KTR_SPARE2, "handle: now %d.%08x", 166 (int)(now >> 32), (u_int)(now & 0xffffffff)); 167 done = 0; 168 if (fake) { 169 frame = NULL; 170 usermode = 0; 171 } else { 172 frame = curthread->td_intr_frame; 173 usermode = TRAPF_USERMODE(frame); 174 } 175 176 state = DPCPU_PTR(timerstate); 177 178 runs = 0; 179 while (now >= state->nexthard) { 180 state->nexthard += tick_sbt; 181 runs++; 182 } 183 if (runs) { 184 hct = DPCPU_PTR(hardclocktime); 185 *hct = state->nexthard - tick_sbt; 186 if (fake < 2) { 187 hardclock(runs, usermode); 188 done = 1; 189 } 190 } 191 runs = 0; 192 while (now >= state->nextstat) { 193 state->nextstat += statperiod; 194 runs++; 195 } 196 if (runs && fake < 2) { 197 statclock(runs, usermode); 198 done = 1; 199 } 200 if (profiling) { 201 runs = 0; 202 while (now >= state->nextprof) { 203 state->nextprof += profperiod; 204 runs++; 205 } 206 if (runs && !fake) { 207 profclock(runs, usermode, TRAPF_PC(frame)); 208 done = 1; 209 } 210 } else 211 state->nextprof = state->nextstat; 212 if (now >= state->nextcallopt || now >= state->nextcall) { 213 state->nextcall = state->nextcallopt = SBT_MAX; 214 callout_process(now); 215 } 216 217 ET_HW_LOCK(state); 218 t = getnextcpuevent(state, 0); 219 if (!busy) { 220 state->idle = 0; 221 state->nextevent = t; 222 loadtimer(now, (fake == 2) && 223 (timer->et_flags & ET_FLAGS_PERCPU)); 224 } 225 ET_HW_UNLOCK(state); 226 return (done); 227 } 228 229 /* 230 * Schedule binuptime of the next event on current CPU. 231 */ 232 static sbintime_t 233 getnextcpuevent(struct pcpu_state *state, int idle) 234 { 235 sbintime_t event; 236 u_int hardfreq; 237 238 /* Handle hardclock() events, skipping some if CPU is idle. */ 239 event = state->nexthard; 240 if (idle) { 241 if (tc_min_ticktock_freq > 1 242 #ifdef SMP 243 && curcpu == CPU_FIRST() 244 #endif 245 ) 246 hardfreq = hz / tc_min_ticktock_freq; 247 else 248 hardfreq = hz; 249 if (hardfreq > 1) 250 event += tick_sbt * (hardfreq - 1); 251 } 252 /* Handle callout events. */ 253 if (event > state->nextcall) 254 event = state->nextcall; 255 if (!idle) { /* If CPU is active - handle other types of events. */ 256 if (event > state->nextstat) 257 event = state->nextstat; 258 if (profiling && event > state->nextprof) 259 event = state->nextprof; 260 } 261 return (event); 262 } 263 264 /* 265 * Schedule binuptime of the next event on all CPUs. 266 */ 267 static sbintime_t 268 getnextevent(struct pcpu_state *state) 269 { 270 sbintime_t event; 271 #ifdef SMP 272 int cpu; 273 #endif 274 #ifdef KTR 275 int c; 276 277 c = -1; 278 #endif 279 event = state->nextevent; 280 #ifdef SMP 281 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 282 CPU_FOREACH(cpu) { 283 state = DPCPU_ID_PTR(cpu, timerstate); 284 if (event > state->nextevent) { 285 event = state->nextevent; 286 #ifdef KTR 287 c = cpu; 288 #endif 289 } 290 } 291 } 292 #endif 293 CTR3(KTR_SPARE2, "next: next %d.%08x by %d", 294 (int)(event >> 32), (u_int)(event & 0xffffffff), c); 295 return (event); 296 } 297 298 /* Hardware timer callback function. */ 299 static void 300 timercb(struct eventtimer *et, void *arg) 301 { 302 sbintime_t now; 303 sbintime_t *next; 304 struct pcpu_state *state; 305 #ifdef SMP 306 int cpu, bcast; 307 #endif 308 309 /* Do not touch anything if somebody reconfiguring timers. */ 310 if (busy) 311 return; 312 /* Update present and next tick times. */ 313 state = DPCPU_PTR(timerstate); 314 if (et->et_flags & ET_FLAGS_PERCPU) { 315 next = &state->nexttick; 316 } else 317 next = &nexttick; 318 now = sbinuptime(); 319 if (periodic) 320 *next = now + timerperiod; 321 else 322 *next = -1; /* Next tick is not scheduled yet. */ 323 state->now = now; 324 CTR2(KTR_SPARE2, "intr: now %d.%08x", 325 (int)(now >> 32), (u_int)(now & 0xffffffff)); 326 327 #ifdef SMP 328 #ifdef EARLY_AP_STARTUP 329 MPASS(mp_ncpus == 1 || smp_started); 330 #endif 331 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 332 bcast = 0; 333 #ifdef EARLY_AP_STARTUP 334 if ((et->et_flags & ET_FLAGS_PERCPU) == 0) { 335 #else 336 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 337 #endif 338 CPU_FOREACH(cpu) { 339 state = DPCPU_ID_PTR(cpu, timerstate); 340 ET_HW_LOCK(state); 341 state->now = now; 342 if (now >= state->nextevent) { 343 state->nextevent += SBT_1S; 344 if (curcpu != cpu) { 345 state->ipi = 1; 346 bcast = 1; 347 } 348 } 349 ET_HW_UNLOCK(state); 350 } 351 } 352 #endif 353 354 /* Handle events for this time on this CPU. */ 355 handleevents(now, 0); 356 357 #ifdef SMP 358 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 359 if (bcast) { 360 CPU_FOREACH(cpu) { 361 if (curcpu == cpu) 362 continue; 363 state = DPCPU_ID_PTR(cpu, timerstate); 364 if (state->ipi) { 365 state->ipi = 0; 366 ipi_cpu(cpu, IPI_HARDCLOCK); 367 } 368 } 369 } 370 #endif 371 } 372 373 /* 374 * Load new value into hardware timer. 375 */ 376 static void 377 loadtimer(sbintime_t now, int start) 378 { 379 struct pcpu_state *state; 380 sbintime_t new; 381 sbintime_t *next; 382 uint64_t tmp; 383 int eq; 384 385 state = DPCPU_PTR(timerstate); 386 if (timer->et_flags & ET_FLAGS_PERCPU) 387 next = &state->nexttick; 388 else 389 next = &nexttick; 390 if (periodic) { 391 if (start) { 392 /* 393 * Try to start all periodic timers aligned 394 * to period to make events synchronous. 395 */ 396 tmp = now % timerperiod; 397 new = timerperiod - tmp; 398 if (new < tmp) /* Left less then passed. */ 399 new += timerperiod; 400 CTR4(KTR_SPARE2, "load p: now %d.%08x first in %d.%08x", 401 (int)(now >> 32), (u_int)(now & 0xffffffff), 402 (int)(new >> 32), (u_int)(new & 0xffffffff)); 403 *next = new + now; 404 et_start(timer, new, timerperiod); 405 } 406 } else { 407 new = getnextevent(state); 408 eq = (new == *next); 409 CTR3(KTR_SPARE2, "load: next %d.%08x eq %d", 410 (int)(new >> 32), (u_int)(new & 0xffffffff), eq); 411 if (!eq) { 412 *next = new; 413 et_start(timer, new - now, 0); 414 } 415 } 416 } 417 418 /* 419 * Prepare event timer parameters after configuration changes. 420 */ 421 static void 422 setuptimer(void) 423 { 424 int freq; 425 426 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 427 periodic = 0; 428 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 429 periodic = 1; 430 singlemul = MIN(MAX(singlemul, 1), 20); 431 freq = hz * singlemul; 432 while (freq < (profiling ? profhz : stathz)) 433 freq += hz; 434 freq = round_freq(timer, freq); 435 timerperiod = SBT_1S / freq; 436 } 437 438 /* 439 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 440 */ 441 static int 442 doconfigtimer(void) 443 { 444 sbintime_t now; 445 struct pcpu_state *state; 446 447 state = DPCPU_PTR(timerstate); 448 switch (atomic_load_acq_int(&state->action)) { 449 case 1: 450 now = sbinuptime(); 451 ET_HW_LOCK(state); 452 loadtimer(now, 1); 453 ET_HW_UNLOCK(state); 454 state->handle = 0; 455 atomic_store_rel_int(&state->action, 0); 456 return (1); 457 case 2: 458 ET_HW_LOCK(state); 459 et_stop(timer); 460 ET_HW_UNLOCK(state); 461 state->handle = 0; 462 atomic_store_rel_int(&state->action, 0); 463 return (1); 464 } 465 if (atomic_readandclear_int(&state->handle) && !busy) { 466 now = sbinuptime(); 467 handleevents(now, 0); 468 return (1); 469 } 470 return (0); 471 } 472 473 /* 474 * Reconfigure specified timer. 475 * For per-CPU timers use IPI to make other CPUs to reconfigure. 476 */ 477 static void 478 configtimer(int start) 479 { 480 sbintime_t now, next; 481 struct pcpu_state *state; 482 int cpu; 483 484 if (start) { 485 setuptimer(); 486 now = sbinuptime(); 487 } else 488 now = 0; 489 critical_enter(); 490 ET_HW_LOCK(DPCPU_PTR(timerstate)); 491 if (start) { 492 /* Initialize time machine parameters. */ 493 next = now + timerperiod; 494 if (periodic) 495 nexttick = next; 496 else 497 nexttick = -1; 498 #ifdef EARLY_AP_STARTUP 499 MPASS(mp_ncpus == 1 || smp_started); 500 #endif 501 CPU_FOREACH(cpu) { 502 state = DPCPU_ID_PTR(cpu, timerstate); 503 state->now = now; 504 #ifndef EARLY_AP_STARTUP 505 if (!smp_started && cpu != CPU_FIRST()) 506 state->nextevent = SBT_MAX; 507 else 508 #endif 509 state->nextevent = next; 510 if (periodic) 511 state->nexttick = next; 512 else 513 state->nexttick = -1; 514 state->nexthard = next; 515 state->nextstat = next; 516 state->nextprof = next; 517 state->nextcall = next; 518 state->nextcallopt = next; 519 hardclock_sync(cpu); 520 } 521 busy = 0; 522 /* Start global timer or per-CPU timer of this CPU. */ 523 loadtimer(now, 1); 524 } else { 525 busy = 1; 526 /* Stop global timer or per-CPU timer of this CPU. */ 527 et_stop(timer); 528 } 529 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 530 #ifdef SMP 531 #ifdef EARLY_AP_STARTUP 532 /* If timer is global we are done. */ 533 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 534 #else 535 /* If timer is global or there is no other CPUs yet - we are done. */ 536 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 537 #endif 538 critical_exit(); 539 return; 540 } 541 /* Set reconfigure flags for other CPUs. */ 542 CPU_FOREACH(cpu) { 543 state = DPCPU_ID_PTR(cpu, timerstate); 544 atomic_store_rel_int(&state->action, 545 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 546 } 547 /* Broadcast reconfigure IPI. */ 548 ipi_all_but_self(IPI_HARDCLOCK); 549 /* Wait for reconfiguration completed. */ 550 restart: 551 cpu_spinwait(); 552 CPU_FOREACH(cpu) { 553 if (cpu == curcpu) 554 continue; 555 state = DPCPU_ID_PTR(cpu, timerstate); 556 if (atomic_load_acq_int(&state->action)) 557 goto restart; 558 } 559 #endif 560 critical_exit(); 561 } 562 563 /* 564 * Calculate nearest frequency supported by hardware timer. 565 */ 566 static int 567 round_freq(struct eventtimer *et, int freq) 568 { 569 uint64_t div; 570 571 if (et->et_frequency != 0) { 572 div = lmax((et->et_frequency + freq / 2) / freq, 1); 573 if (et->et_flags & ET_FLAGS_POW2DIV) 574 div = 1 << (flsl(div + div / 2) - 1); 575 freq = (et->et_frequency + div / 2) / div; 576 } 577 if (et->et_min_period > SBT_1S) 578 panic("Event timer \"%s\" doesn't support sub-second periods!", 579 et->et_name); 580 else if (et->et_min_period != 0) 581 freq = min(freq, SBT2FREQ(et->et_min_period)); 582 if (et->et_max_period < SBT_1S && et->et_max_period != 0) 583 freq = max(freq, SBT2FREQ(et->et_max_period)); 584 return (freq); 585 } 586 587 /* 588 * Configure and start event timers (BSP part). 589 */ 590 void 591 cpu_initclocks_bsp(void) 592 { 593 struct pcpu_state *state; 594 int base, div, cpu; 595 596 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 597 CPU_FOREACH(cpu) { 598 state = DPCPU_ID_PTR(cpu, timerstate); 599 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 600 state->nextcall = SBT_MAX; 601 state->nextcallopt = SBT_MAX; 602 } 603 periodic = want_periodic; 604 /* Grab requested timer or the best of present. */ 605 if (timername[0]) 606 timer = et_find(timername, 0, 0); 607 if (timer == NULL && periodic) { 608 timer = et_find(NULL, 609 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 610 } 611 if (timer == NULL) { 612 timer = et_find(NULL, 613 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 614 } 615 if (timer == NULL && !periodic) { 616 timer = et_find(NULL, 617 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 618 } 619 if (timer == NULL) 620 panic("No usable event timer found!"); 621 et_init(timer, timercb, NULL, NULL); 622 623 /* Adapt to timer capabilities. */ 624 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 625 periodic = 0; 626 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 627 periodic = 1; 628 if (timer->et_flags & ET_FLAGS_C3STOP) 629 cpu_disable_c3_sleep++; 630 631 /* 632 * We honor the requested 'hz' value. 633 * We want to run stathz in the neighborhood of 128hz. 634 * We would like profhz to run as often as possible. 635 */ 636 if (singlemul <= 0 || singlemul > 20) { 637 if (hz >= 1500 || (hz % 128) == 0) 638 singlemul = 1; 639 else if (hz >= 750) 640 singlemul = 2; 641 else 642 singlemul = 4; 643 } 644 if (periodic) { 645 base = round_freq(timer, hz * singlemul); 646 singlemul = max((base + hz / 2) / hz, 1); 647 hz = (base + singlemul / 2) / singlemul; 648 if (base <= 128) 649 stathz = base; 650 else { 651 div = base / 128; 652 if (div >= singlemul && (div % singlemul) == 0) 653 div++; 654 stathz = base / div; 655 } 656 profhz = stathz; 657 while ((profhz + stathz) <= 128 * 64) 658 profhz += stathz; 659 profhz = round_freq(timer, profhz); 660 } else { 661 hz = round_freq(timer, hz); 662 stathz = round_freq(timer, 127); 663 profhz = round_freq(timer, stathz * 64); 664 } 665 tick = 1000000 / hz; 666 tick_sbt = SBT_1S / hz; 667 tick_bt = sbttobt(tick_sbt); 668 statperiod = SBT_1S / stathz; 669 profperiod = SBT_1S / profhz; 670 ET_LOCK(); 671 configtimer(1); 672 ET_UNLOCK(); 673 } 674 675 /* 676 * Start per-CPU event timers on APs. 677 */ 678 void 679 cpu_initclocks_ap(void) 680 { 681 struct pcpu_state *state; 682 struct thread *td; 683 684 state = DPCPU_PTR(timerstate); 685 ET_HW_LOCK(state); 686 state->now = sbinuptime(); 687 hardclock_sync(curcpu); 688 spinlock_enter(); 689 ET_HW_UNLOCK(state); 690 td = curthread; 691 td->td_intr_nesting_level++; 692 handleevents(state->now, 2); 693 td->td_intr_nesting_level--; 694 spinlock_exit(); 695 } 696 697 void 698 suspendclock(void) 699 { 700 ET_LOCK(); 701 configtimer(0); 702 ET_UNLOCK(); 703 } 704 705 void 706 resumeclock(void) 707 { 708 ET_LOCK(); 709 configtimer(1); 710 ET_UNLOCK(); 711 } 712 713 /* 714 * Switch to profiling clock rates. 715 */ 716 void 717 cpu_startprofclock(void) 718 { 719 720 ET_LOCK(); 721 if (profiling == 0) { 722 if (periodic) { 723 configtimer(0); 724 profiling = 1; 725 configtimer(1); 726 } else 727 profiling = 1; 728 } else 729 profiling++; 730 ET_UNLOCK(); 731 } 732 733 /* 734 * Switch to regular clock rates. 735 */ 736 void 737 cpu_stopprofclock(void) 738 { 739 740 ET_LOCK(); 741 if (profiling == 1) { 742 if (periodic) { 743 configtimer(0); 744 profiling = 0; 745 configtimer(1); 746 } else 747 profiling = 0; 748 } else 749 profiling--; 750 ET_UNLOCK(); 751 } 752 753 /* 754 * Switch to idle mode (all ticks handled). 755 */ 756 sbintime_t 757 cpu_idleclock(void) 758 { 759 sbintime_t now, t; 760 struct pcpu_state *state; 761 762 if (idletick || busy || 763 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 764 #ifdef DEVICE_POLLING 765 || curcpu == CPU_FIRST() 766 #endif 767 ) 768 return (-1); 769 state = DPCPU_PTR(timerstate); 770 ET_HW_LOCK(state); 771 if (periodic) 772 now = state->now; 773 else 774 now = sbinuptime(); 775 CTR2(KTR_SPARE2, "idle: now %d.%08x", 776 (int)(now >> 32), (u_int)(now & 0xffffffff)); 777 t = getnextcpuevent(state, 1); 778 state->idle = 1; 779 state->nextevent = t; 780 if (!periodic) 781 loadtimer(now, 0); 782 ET_HW_UNLOCK(state); 783 return (MAX(t - now, 0)); 784 } 785 786 /* 787 * Switch to active mode (skip empty ticks). 788 */ 789 void 790 cpu_activeclock(void) 791 { 792 sbintime_t now; 793 struct pcpu_state *state; 794 struct thread *td; 795 796 state = DPCPU_PTR(timerstate); 797 if (atomic_load_int(&state->idle) == 0 || busy) 798 return; 799 spinlock_enter(); 800 if (periodic) 801 now = state->now; 802 else 803 now = sbinuptime(); 804 CTR2(KTR_SPARE2, "active: now %d.%08x", 805 (int)(now >> 32), (u_int)(now & 0xffffffff)); 806 td = curthread; 807 td->td_intr_nesting_level++; 808 handleevents(now, 1); 809 td->td_intr_nesting_level--; 810 spinlock_exit(); 811 } 812 813 /* 814 * Change the frequency of the given timer. This changes et->et_frequency and 815 * if et is the active timer it reconfigures the timer on all CPUs. This is 816 * intended to be a private interface for the use of et_change_frequency() only. 817 */ 818 void 819 cpu_et_frequency(struct eventtimer *et, uint64_t newfreq) 820 { 821 822 ET_LOCK(); 823 if (et == timer) { 824 configtimer(0); 825 et->et_frequency = newfreq; 826 configtimer(1); 827 } else 828 et->et_frequency = newfreq; 829 ET_UNLOCK(); 830 } 831 832 void 833 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) 834 { 835 struct pcpu_state *state; 836 837 /* Do not touch anything if somebody reconfiguring timers. */ 838 if (busy) 839 return; 840 841 CTR5(KTR_SPARE2, "new co: on %d at %d.%08x - %d.%08x", 842 cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), 843 (int)(bt >> 32), (u_int)(bt & 0xffffffff)); 844 845 KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); 846 state = DPCPU_ID_PTR(cpu, timerstate); 847 ET_HW_LOCK(state); 848 849 /* 850 * If there is callout time already set earlier -- do nothing. 851 * This check may appear redundant because we check already in 852 * callout_process() but this double check guarantees we're safe 853 * with respect to race conditions between interrupts execution 854 * and scheduling. 855 */ 856 state->nextcallopt = bt_opt; 857 if (bt >= state->nextcall) 858 goto done; 859 state->nextcall = bt; 860 /* If there is some other event set earlier -- do nothing. */ 861 if (bt >= state->nextevent) 862 goto done; 863 state->nextevent = bt; 864 /* If timer is periodic -- there is nothing to reprogram. */ 865 if (periodic) 866 goto done; 867 /* If timer is global or of the current CPU -- reprogram it. */ 868 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { 869 loadtimer(sbinuptime(), 0); 870 done: 871 ET_HW_UNLOCK(state); 872 return; 873 } 874 /* Otherwise make other CPU to reprogram it. */ 875 state->handle = 1; 876 ET_HW_UNLOCK(state); 877 #ifdef SMP 878 ipi_cpu(cpu, IPI_HARDCLOCK); 879 #endif 880 } 881 882 /* 883 * Report or change the active event timers hardware. 884 */ 885 static int 886 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 887 { 888 char buf[32]; 889 struct eventtimer *et; 890 int error; 891 892 ET_LOCK(); 893 et = timer; 894 snprintf(buf, sizeof(buf), "%s", et->et_name); 895 ET_UNLOCK(); 896 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 897 ET_LOCK(); 898 et = timer; 899 if (error != 0 || req->newptr == NULL || 900 strcasecmp(buf, et->et_name) == 0) { 901 ET_UNLOCK(); 902 return (error); 903 } 904 et = et_find(buf, 0, 0); 905 if (et == NULL) { 906 ET_UNLOCK(); 907 return (ENOENT); 908 } 909 configtimer(0); 910 et_free(timer); 911 if (et->et_flags & ET_FLAGS_C3STOP) 912 cpu_disable_c3_sleep++; 913 if (timer->et_flags & ET_FLAGS_C3STOP) 914 cpu_disable_c3_sleep--; 915 periodic = want_periodic; 916 timer = et; 917 et_init(timer, timercb, NULL, NULL); 918 configtimer(1); 919 ET_UNLOCK(); 920 return (error); 921 } 922 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 923 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 924 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 925 926 /* 927 * Report or change the active event timer periodicity. 928 */ 929 static int 930 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 931 { 932 int error, val; 933 934 val = periodic; 935 error = sysctl_handle_int(oidp, &val, 0, req); 936 if (error != 0 || req->newptr == NULL) 937 return (error); 938 ET_LOCK(); 939 configtimer(0); 940 periodic = want_periodic = val; 941 configtimer(1); 942 ET_UNLOCK(); 943 return (error); 944 } 945 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 946 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 947 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 948 949 #include "opt_ddb.h" 950 951 #ifdef DDB 952 #include <ddb/ddb.h> 953 954 DB_SHOW_COMMAND(clocksource, db_show_clocksource) 955 { 956 struct pcpu_state *st; 957 int c; 958 959 CPU_FOREACH(c) { 960 st = DPCPU_ID_PTR(c, timerstate); 961 db_printf( 962 "CPU %2d: action %d handle %d ipi %d idle %d\n" 963 " now %#jx nevent %#jx (%jd)\n" 964 " ntick %#jx (%jd) nhard %#jx (%jd)\n" 965 " nstat %#jx (%jd) nprof %#jx (%jd)\n" 966 " ncall %#jx (%jd) ncallopt %#jx (%jd)\n", 967 c, st->action, st->handle, st->ipi, st->idle, 968 (uintmax_t)st->now, 969 (uintmax_t)st->nextevent, 970 (uintmax_t)(st->nextevent - st->now) / tick_sbt, 971 (uintmax_t)st->nexttick, 972 (uintmax_t)(st->nexttick - st->now) / tick_sbt, 973 (uintmax_t)st->nexthard, 974 (uintmax_t)(st->nexthard - st->now) / tick_sbt, 975 (uintmax_t)st->nextstat, 976 (uintmax_t)(st->nextstat - st->now) / tick_sbt, 977 (uintmax_t)st->nextprof, 978 (uintmax_t)(st->nextprof - st->now) / tick_sbt, 979 (uintmax_t)st->nextcall, 980 (uintmax_t)(st->nextcall - st->now) / tick_sbt, 981 (uintmax_t)st->nextcallopt, 982 (uintmax_t)(st->nextcallopt - st->now) / tick_sbt); 983 } 984 } 985 986 #endif 987