1 /*- 2 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1982, 1986, 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * (c) UNIX System Laboratories, Inc. 6 * All or some portions of this file are derived from material licensed 7 * to the University of California by American Telephone and Telegraph 8 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * the permission of UNIX System Laboratories, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 40 * $Id: kern_clock.c,v 1.85 1998/11/23 09:58:53 phk Exp $ 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/dkstat.h> 46 #include <sys/callout.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/resourcevar.h> 51 #include <sys/signalvar.h> 52 #include <sys/timex.h> 53 #include <vm/vm.h> 54 #include <sys/lock.h> 55 #include <vm/pmap.h> 56 #include <vm/vm_map.h> 57 #include <sys/sysctl.h> 58 59 #include <machine/cpu.h> 60 #include <machine/limits.h> 61 62 #ifdef GPROF 63 #include <sys/gmon.h> 64 #endif 65 66 #if defined(SMP) && defined(BETTER_CLOCK) 67 #include <machine/smp.h> 68 #endif 69 70 /* This is where the NTIMECOUNTER option hangs out */ 71 #include "opt_ntp.h" 72 73 /* 74 * Number of timecounters used to implement stable storage 75 */ 76 #ifndef NTIMECOUNTER 77 #define NTIMECOUNTER 5 78 #endif 79 80 static MALLOC_DEFINE(M_TIMECOUNTER, "timecounter", 81 "Timecounter stable storage"); 82 83 static void initclocks __P((void *dummy)); 84 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 85 86 static void tco_forward __P((int force)); 87 static void tco_setscales __P((struct timecounter *tc)); 88 static __inline unsigned tco_delta __P((struct timecounter *tc)); 89 90 /* Some of these don't belong here, but it's easiest to concentrate them. */ 91 #if defined(SMP) && defined(BETTER_CLOCK) 92 long cp_time[CPUSTATES]; 93 #else 94 static long cp_time[CPUSTATES]; 95 #endif 96 97 long tk_cancc; 98 long tk_nin; 99 long tk_nout; 100 long tk_rawcc; 101 102 time_t time_second; 103 104 /* 105 * Which update policy to use. 106 * 0 - every tick, bad hardware may fail with "calcru negative..." 107 * 1 - more resistent to the above hardware, but less efficient. 108 */ 109 static int tco_method; 110 111 /* 112 * Implement a dummy timecounter which we can use until we get a real one 113 * in the air. This allows the console and other early stuff to use 114 * timeservices. 115 */ 116 117 static unsigned 118 dummy_get_timecount(struct timecounter *tc) 119 { 120 static unsigned now; 121 return (++now); 122 } 123 124 static struct timecounter dummy_timecounter = { 125 dummy_get_timecount, 126 0, 127 ~0u, 128 1000000, 129 "dummy" 130 }; 131 132 struct timecounter *timecounter = &dummy_timecounter; 133 134 /* 135 * Clock handling routines. 136 * 137 * This code is written to operate with two timers that run independently of 138 * each other. 139 * 140 * The main timer, running hz times per second, is used to trigger interval 141 * timers, timeouts and rescheduling as needed. 142 * 143 * The second timer handles kernel and user profiling, 144 * and does resource use estimation. If the second timer is programmable, 145 * it is randomized to avoid aliasing between the two clocks. For example, 146 * the randomization prevents an adversary from always giving up the cpu 147 * just before its quantum expires. Otherwise, it would never accumulate 148 * cpu ticks. The mean frequency of the second timer is stathz. 149 * 150 * If no second timer exists, stathz will be zero; in this case we drive 151 * profiling and statistics off the main clock. This WILL NOT be accurate; 152 * do not do it unless absolutely necessary. 153 * 154 * The statistics clock may (or may not) be run at a higher rate while 155 * profiling. This profile clock runs at profhz. We require that profhz 156 * be an integral multiple of stathz. 157 * 158 * If the statistics clock is running fast, it must be divided by the ratio 159 * profhz/stathz for statistics. (For profiling, every tick counts.) 160 * 161 * Time-of-day is maintained using a "timecounter", which may or may 162 * not be related to the hardware generating the above mentioned 163 * interrupts. 164 */ 165 166 int stathz; 167 int profhz; 168 static int profprocs; 169 int ticks; 170 static int psdiv, pscnt; /* prof => stat divider */ 171 int psratio; /* ratio: prof / stat */ 172 173 /* 174 * Initialize clock frequencies and start both clocks running. 175 */ 176 /* ARGSUSED*/ 177 static void 178 initclocks(dummy) 179 void *dummy; 180 { 181 register int i; 182 183 /* 184 * Set divisors to 1 (normal case) and let the machine-specific 185 * code do its bit. 186 */ 187 psdiv = pscnt = 1; 188 cpu_initclocks(); 189 190 /* 191 * Compute profhz/stathz, and fix profhz if needed. 192 */ 193 i = stathz ? stathz : hz; 194 if (profhz == 0) 195 profhz = i; 196 psratio = profhz / i; 197 } 198 199 /* 200 * The real-time timer, interrupting hz times per second. 201 */ 202 void 203 hardclock(frame) 204 register struct clockframe *frame; 205 { 206 register struct proc *p; 207 208 p = curproc; 209 if (p) { 210 register struct pstats *pstats; 211 212 /* 213 * Run current process's virtual and profile time, as needed. 214 */ 215 pstats = p->p_stats; 216 if (CLKF_USERMODE(frame) && 217 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 218 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 219 psignal(p, SIGVTALRM); 220 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 221 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 222 psignal(p, SIGPROF); 223 } 224 225 #if defined(SMP) && defined(BETTER_CLOCK) 226 forward_hardclock(pscnt); 227 #endif 228 229 /* 230 * If no separate statistics clock is available, run it from here. 231 */ 232 if (stathz == 0) 233 statclock(frame); 234 235 tco_forward(0); 236 ticks++; 237 238 /* 239 * Process callouts at a very low cpu priority, so we don't keep the 240 * relatively high clock interrupt priority any longer than necessary. 241 */ 242 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 243 if (CLKF_BASEPRI(frame)) { 244 /* 245 * Save the overhead of a software interrupt; 246 * it will happen as soon as we return, so do it now. 247 */ 248 (void)splsoftclock(); 249 softclock(); 250 } else 251 setsoftclock(); 252 } else if (softticks + 1 == ticks) 253 ++softticks; 254 } 255 256 /* 257 * Compute number of ticks in the specified amount of time. 258 */ 259 int 260 tvtohz(tv) 261 struct timeval *tv; 262 { 263 register unsigned long ticks; 264 register long sec, usec; 265 266 /* 267 * If the number of usecs in the whole seconds part of the time 268 * difference fits in a long, then the total number of usecs will 269 * fit in an unsigned long. Compute the total and convert it to 270 * ticks, rounding up and adding 1 to allow for the current tick 271 * to expire. Rounding also depends on unsigned long arithmetic 272 * to avoid overflow. 273 * 274 * Otherwise, if the number of ticks in the whole seconds part of 275 * the time difference fits in a long, then convert the parts to 276 * ticks separately and add, using similar rounding methods and 277 * overflow avoidance. This method would work in the previous 278 * case but it is slightly slower and assumes that hz is integral. 279 * 280 * Otherwise, round the time difference down to the maximum 281 * representable value. 282 * 283 * If ints have 32 bits, then the maximum value for any timeout in 284 * 10ms ticks is 248 days. 285 */ 286 sec = tv->tv_sec; 287 usec = tv->tv_usec; 288 if (usec < 0) { 289 sec--; 290 usec += 1000000; 291 } 292 if (sec < 0) { 293 #ifdef DIAGNOSTIC 294 if (usec > 0) { 295 sec++; 296 usec -= 1000000; 297 } 298 printf("tvotohz: negative time difference %ld sec %ld usec\n", 299 sec, usec); 300 #endif 301 ticks = 1; 302 } else if (sec <= LONG_MAX / 1000000) 303 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 304 / tick + 1; 305 else if (sec <= LONG_MAX / hz) 306 ticks = sec * hz 307 + ((unsigned long)usec + (tick - 1)) / tick + 1; 308 else 309 ticks = LONG_MAX; 310 if (ticks > INT_MAX) 311 ticks = INT_MAX; 312 return ((int)ticks); 313 } 314 315 /* 316 * Start profiling on a process. 317 * 318 * Kernel profiling passes proc0 which never exits and hence 319 * keeps the profile clock running constantly. 320 */ 321 void 322 startprofclock(p) 323 register struct proc *p; 324 { 325 int s; 326 327 if ((p->p_flag & P_PROFIL) == 0) { 328 p->p_flag |= P_PROFIL; 329 if (++profprocs == 1 && stathz != 0) { 330 s = splstatclock(); 331 psdiv = pscnt = psratio; 332 setstatclockrate(profhz); 333 splx(s); 334 } 335 } 336 } 337 338 /* 339 * Stop profiling on a process. 340 */ 341 void 342 stopprofclock(p) 343 register struct proc *p; 344 { 345 int s; 346 347 if (p->p_flag & P_PROFIL) { 348 p->p_flag &= ~P_PROFIL; 349 if (--profprocs == 0 && stathz != 0) { 350 s = splstatclock(); 351 psdiv = pscnt = 1; 352 setstatclockrate(stathz); 353 splx(s); 354 } 355 } 356 } 357 358 /* 359 * Statistics clock. Grab profile sample, and if divider reaches 0, 360 * do process and kernel statistics. 361 */ 362 void 363 statclock(frame) 364 register struct clockframe *frame; 365 { 366 #ifdef GPROF 367 register struct gmonparam *g; 368 int i; 369 #endif 370 register struct proc *p; 371 struct pstats *pstats; 372 long rss; 373 struct rusage *ru; 374 struct vmspace *vm; 375 376 if (curproc != NULL && CLKF_USERMODE(frame)) { 377 p = curproc; 378 if (p->p_flag & P_PROFIL) 379 addupc_intr(p, CLKF_PC(frame), 1); 380 #if defined(SMP) && defined(BETTER_CLOCK) 381 if (stathz != 0) 382 forward_statclock(pscnt); 383 #endif 384 if (--pscnt > 0) 385 return; 386 /* 387 * Came from user mode; CPU was in user state. 388 * If this process is being profiled record the tick. 389 */ 390 p->p_uticks++; 391 if (p->p_nice > NZERO) 392 cp_time[CP_NICE]++; 393 else 394 cp_time[CP_USER]++; 395 } else { 396 #ifdef GPROF 397 /* 398 * Kernel statistics are just like addupc_intr, only easier. 399 */ 400 g = &_gmonparam; 401 if (g->state == GMON_PROF_ON) { 402 i = CLKF_PC(frame) - g->lowpc; 403 if (i < g->textsize) { 404 i /= HISTFRACTION * sizeof(*g->kcount); 405 g->kcount[i]++; 406 } 407 } 408 #endif 409 #if defined(SMP) && defined(BETTER_CLOCK) 410 if (stathz != 0) 411 forward_statclock(pscnt); 412 #endif 413 if (--pscnt > 0) 414 return; 415 /* 416 * Came from kernel mode, so we were: 417 * - handling an interrupt, 418 * - doing syscall or trap work on behalf of the current 419 * user process, or 420 * - spinning in the idle loop. 421 * Whichever it is, charge the time as appropriate. 422 * Note that we charge interrupts to the current process, 423 * regardless of whether they are ``for'' that process, 424 * so that we know how much of its real time was spent 425 * in ``non-process'' (i.e., interrupt) work. 426 */ 427 p = curproc; 428 if (CLKF_INTR(frame)) { 429 if (p != NULL) 430 p->p_iticks++; 431 cp_time[CP_INTR]++; 432 } else if (p != NULL) { 433 p->p_sticks++; 434 cp_time[CP_SYS]++; 435 } else 436 cp_time[CP_IDLE]++; 437 } 438 pscnt = psdiv; 439 440 /* 441 * We maintain statistics shown by user-level statistics 442 * programs: the amount of time in each cpu state. 443 */ 444 445 /* 446 * We adjust the priority of the current process. The priority of 447 * a process gets worse as it accumulates CPU time. The cpu usage 448 * estimator (p_estcpu) is increased here. The formula for computing 449 * priorities (in kern_synch.c) will compute a different value each 450 * time p_estcpu increases by 4. The cpu usage estimator ramps up 451 * quite quickly when the process is running (linearly), and decays 452 * away exponentially, at a rate which is proportionally slower when 453 * the system is busy. The basic principal is that the system will 454 * 90% forget that the process used a lot of CPU time in 5 * loadav 455 * seconds. This causes the system to favor processes which haven't 456 * run much recently, and to round-robin among other processes. 457 */ 458 if (p != NULL) { 459 p->p_cpticks++; 460 if (++p->p_estcpu == 0) 461 p->p_estcpu--; 462 if ((p->p_estcpu & 3) == 0) { 463 resetpriority(p); 464 if (p->p_priority >= PUSER) 465 p->p_priority = p->p_usrpri; 466 } 467 468 /* Update resource usage integrals and maximums. */ 469 if ((pstats = p->p_stats) != NULL && 470 (ru = &pstats->p_ru) != NULL && 471 (vm = p->p_vmspace) != NULL) { 472 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 473 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 474 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 475 rss = vm->vm_pmap.pm_stats.resident_count * 476 PAGE_SIZE / 1024; 477 if (ru->ru_maxrss < rss) 478 ru->ru_maxrss = rss; 479 } 480 } 481 } 482 483 /* 484 * Return information about system clocks. 485 */ 486 static int 487 sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 488 { 489 struct clockinfo clkinfo; 490 /* 491 * Construct clockinfo structure. 492 */ 493 clkinfo.hz = hz; 494 clkinfo.tick = tick; 495 clkinfo.tickadj = tickadj; 496 clkinfo.profhz = profhz; 497 clkinfo.stathz = stathz ? stathz : hz; 498 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 499 } 500 501 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 502 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 503 504 static __inline unsigned 505 tco_delta(struct timecounter *tc) 506 { 507 508 return ((tc->tc_get_timecount(tc) - tc->tc_offset_count) & 509 tc->tc_counter_mask); 510 } 511 512 /* 513 * We have four functions for looking at the clock, two for microseconds 514 * and two for nanoseconds. For each there is fast but less precise 515 * version "get{nano|micro}time" which will return a time which is up 516 * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time" 517 * will return a timestamp which is as precise as possible. 518 */ 519 520 void 521 getmicrotime(struct timeval *tvp) 522 { 523 struct timecounter *tc; 524 525 if (!tco_method) { 526 tc = timecounter; 527 *tvp = tc->tc_microtime; 528 } else { 529 microtime(tvp); 530 } 531 } 532 533 void 534 getnanotime(struct timespec *tsp) 535 { 536 struct timecounter *tc; 537 538 if (!tco_method) { 539 tc = timecounter; 540 *tsp = tc->tc_nanotime; 541 } else { 542 nanotime(tsp); 543 } 544 } 545 546 void 547 microtime(struct timeval *tv) 548 { 549 struct timecounter *tc; 550 551 tc = (struct timecounter *)timecounter; 552 tv->tv_sec = tc->tc_offset_sec; 553 tv->tv_usec = tc->tc_offset_micro; 554 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 555 tv->tv_usec += boottime.tv_usec; 556 tv->tv_sec += boottime.tv_sec; 557 while (tv->tv_usec >= 1000000) { 558 tv->tv_usec -= 1000000; 559 tv->tv_sec++; 560 } 561 } 562 563 void 564 nanotime(struct timespec *ts) 565 { 566 unsigned count; 567 u_int64_t delta; 568 struct timecounter *tc; 569 570 tc = (struct timecounter *)timecounter; 571 ts->tv_sec = tc->tc_offset_sec; 572 count = tco_delta(tc); 573 delta = tc->tc_offset_nano; 574 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 575 delta >>= 32; 576 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 577 delta += boottime.tv_usec * 1000; 578 ts->tv_sec += boottime.tv_sec; 579 while (delta >= 1000000000) { 580 delta -= 1000000000; 581 ts->tv_sec++; 582 } 583 ts->tv_nsec = delta; 584 } 585 586 void 587 timecounter_timespec(unsigned count, struct timespec *ts) 588 { 589 u_int64_t delta; 590 struct timecounter *tc; 591 592 tc = (struct timecounter *)timecounter; 593 ts->tv_sec = tc->tc_offset_sec; 594 count -= tc->tc_offset_count; 595 count &= tc->tc_counter_mask; 596 delta = tc->tc_offset_nano; 597 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 598 delta >>= 32; 599 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 600 delta += boottime.tv_usec * 1000; 601 ts->tv_sec += boottime.tv_sec; 602 while (delta >= 1000000000) { 603 delta -= 1000000000; 604 ts->tv_sec++; 605 } 606 ts->tv_nsec = delta; 607 } 608 609 void 610 getmicrouptime(struct timeval *tvp) 611 { 612 struct timecounter *tc; 613 614 if (!tco_method) { 615 tc = timecounter; 616 tvp->tv_sec = tc->tc_offset_sec; 617 tvp->tv_usec = tc->tc_offset_micro; 618 } else { 619 microuptime(tvp); 620 } 621 } 622 623 void 624 getnanouptime(struct timespec *tsp) 625 { 626 struct timecounter *tc; 627 628 if (!tco_method) { 629 tc = timecounter; 630 tsp->tv_sec = tc->tc_offset_sec; 631 tsp->tv_nsec = tc->tc_offset_nano >> 32; 632 } else { 633 nanouptime(tsp); 634 } 635 } 636 637 void 638 microuptime(struct timeval *tv) 639 { 640 struct timecounter *tc; 641 642 tc = (struct timecounter *)timecounter; 643 tv->tv_sec = tc->tc_offset_sec; 644 tv->tv_usec = tc->tc_offset_micro; 645 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 646 if (tv->tv_usec >= 1000000) { 647 tv->tv_usec -= 1000000; 648 tv->tv_sec++; 649 } 650 } 651 652 void 653 nanouptime(struct timespec *ts) 654 { 655 unsigned count; 656 u_int64_t delta; 657 struct timecounter *tc; 658 659 tc = (struct timecounter *)timecounter; 660 ts->tv_sec = tc->tc_offset_sec; 661 count = tco_delta(tc); 662 delta = tc->tc_offset_nano; 663 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 664 delta >>= 32; 665 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 666 if (delta >= 1000000000) { 667 delta -= 1000000000; 668 ts->tv_sec++; 669 } 670 ts->tv_nsec = delta; 671 } 672 673 static void 674 tco_setscales(struct timecounter *tc) 675 { 676 u_int64_t scale; 677 678 scale = 1000000000LL << 32; 679 if (tc->tc_adjustment > 0) 680 scale += (tc->tc_adjustment * 1000LL) << 10; 681 else 682 scale -= (-tc->tc_adjustment * 1000LL) << 10; 683 scale /= tc->tc_frequency; 684 tc->tc_scale_micro = scale / 1000; 685 tc->tc_scale_nano_f = scale & 0xffffffff; 686 tc->tc_scale_nano_i = scale >> 32; 687 } 688 689 void 690 init_timecounter(struct timecounter *tc) 691 { 692 struct timespec ts1; 693 struct timecounter *t1, *t2, *t3; 694 int i; 695 696 tc->tc_adjustment = 0; 697 tco_setscales(tc); 698 tc->tc_offset_count = tc->tc_get_timecount(tc); 699 tc->tc_tweak = tc; 700 MALLOC(t1, struct timecounter *, sizeof *t1, M_TIMECOUNTER, M_WAITOK); 701 *t1 = *tc; 702 t2 = t1; 703 for (i = 1; i < NTIMECOUNTER; i++) { 704 MALLOC(t3, struct timecounter *, sizeof *t3, 705 M_TIMECOUNTER, M_WAITOK); 706 *t3 = *tc; 707 t3->tc_other = t2; 708 t2 = t3; 709 } 710 t1->tc_other = t3; 711 tc = t1; 712 713 printf("Timecounter \"%s\" frequency %lu Hz\n", 714 tc->tc_name, (u_long)tc->tc_frequency); 715 716 /* XXX: For now always start using the counter. */ 717 tc->tc_offset_count = tc->tc_get_timecount(tc); 718 nanouptime(&ts1); 719 tc->tc_offset_nano = (u_int64_t)ts1.tv_nsec << 32; 720 tc->tc_offset_micro = ts1.tv_nsec / 1000; 721 tc->tc_offset_sec = ts1.tv_sec; 722 timecounter = tc; 723 } 724 725 void 726 set_timecounter(struct timespec *ts) 727 { 728 struct timespec ts2; 729 730 nanouptime(&ts2); 731 boottime.tv_sec = ts->tv_sec - ts2.tv_sec; 732 boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; 733 if (boottime.tv_usec < 0) { 734 boottime.tv_usec += 1000000; 735 boottime.tv_sec--; 736 } 737 /* fiddle all the little crinkly bits around the fiords... */ 738 tco_forward(1); 739 } 740 741 742 #if 0 /* Currently unused */ 743 void 744 switch_timecounter(struct timecounter *newtc) 745 { 746 int s; 747 struct timecounter *tc; 748 struct timespec ts; 749 750 s = splclock(); 751 tc = timecounter; 752 if (newtc == tc || newtc == tc->tc_other) { 753 splx(s); 754 return; 755 } 756 nanouptime(&ts); 757 newtc->tc_offset_sec = ts.tv_sec; 758 newtc->tc_offset_nano = (u_int64_t)ts.tv_nsec << 32; 759 newtc->tc_offset_micro = ts.tv_nsec / 1000; 760 newtc->tc_offset_count = newtc->tc_get_timecount(newtc); 761 timecounter = newtc; 762 splx(s); 763 } 764 #endif 765 766 static struct timecounter * 767 sync_other_counter(void) 768 { 769 struct timecounter *tc, *tcn, *tco; 770 unsigned delta; 771 772 tco = timecounter; 773 tc = tco->tc_other; 774 tcn = tc->tc_other; 775 *tc = *tco; 776 tc->tc_other = tcn; 777 delta = tco_delta(tc); 778 tc->tc_offset_count += delta; 779 tc->tc_offset_count &= tc->tc_counter_mask; 780 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_f; 781 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_i << 32; 782 return (tc); 783 } 784 785 static void 786 tco_forward(int force) 787 { 788 struct timecounter *tc, *tco; 789 790 tco = timecounter; 791 tc = sync_other_counter(); 792 /* 793 * We may be inducing a tiny error here, the tc_poll_pps() may 794 * process a latched count which happens after the tco_delta() 795 * in sync_other_counter(), which would extend the previous 796 * counters parameters into the domain of this new one. 797 * Since the timewindow is very small for this, the error is 798 * going to be only a few weenieseconds (as Dave Mills would 799 * say), so lets just not talk more about it, OK ? 800 */ 801 if (tco->tc_poll_pps) 802 tco->tc_poll_pps(tco); 803 if (timedelta != 0) { 804 tc->tc_offset_nano += (u_int64_t)(tickdelta * 1000) << 32; 805 timedelta -= tickdelta; 806 force++; 807 } 808 809 while (tc->tc_offset_nano >= 1000000000ULL << 32) { 810 tc->tc_offset_nano -= 1000000000ULL << 32; 811 tc->tc_offset_sec++; 812 tc->tc_frequency = tc->tc_tweak->tc_frequency; 813 tc->tc_adjustment = tc->tc_tweak->tc_adjustment; 814 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 815 tco_setscales(tc); 816 force++; 817 } 818 819 if (tco_method && !force) 820 return; 821 822 tc->tc_offset_micro = (tc->tc_offset_nano / 1000) >> 32; 823 824 /* Figure out the wall-clock time */ 825 tc->tc_nanotime.tv_sec = tc->tc_offset_sec + boottime.tv_sec; 826 tc->tc_nanotime.tv_nsec = 827 (tc->tc_offset_nano >> 32) + boottime.tv_usec * 1000; 828 tc->tc_microtime.tv_usec = tc->tc_offset_micro + boottime.tv_usec; 829 if (tc->tc_nanotime.tv_nsec >= 1000000000) { 830 tc->tc_nanotime.tv_nsec -= 1000000000; 831 tc->tc_microtime.tv_usec -= 1000000; 832 tc->tc_nanotime.tv_sec++; 833 } 834 time_second = tc->tc_microtime.tv_sec = tc->tc_nanotime.tv_sec; 835 836 timecounter = tc; 837 } 838 839 static int 840 sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS 841 { 842 843 return (sysctl_handle_opaque(oidp, 844 &timecounter->tc_tweak->tc_frequency, 845 sizeof(timecounter->tc_tweak->tc_frequency), req)); 846 } 847 848 static int 849 sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS 850 { 851 852 return (sysctl_handle_opaque(oidp, 853 &timecounter->tc_tweak->tc_adjustment, 854 sizeof(timecounter->tc_tweak->tc_adjustment), req)); 855 } 856 857 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 858 859 SYSCTL_INT(_kern_timecounter, KERN_ARGMAX, method, CTLFLAG_RW, &tco_method, 0, 860 "This variable determines the method used for updating timecounters. " 861 "If the default algorithm (0) fails with \"calcru negative...\" messages " 862 "try the alternate algorithm (1) which handles bad hardware better." 863 864 ); 865 866 SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, 867 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); 868 869 SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, 870 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); 871