1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2009 Adrian Chadd 5 * Copyright (c) 2012 Spectra Logic Corporation 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 /** 32 * \file dev/xen/timer/xen_timer.c 33 * \brief A timer driver for the Xen hypervisor's PV clock. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/module.h> 41 #include <sys/time.h> 42 #include <sys/timetc.h> 43 #include <sys/timeet.h> 44 #include <sys/smp.h> 45 #include <sys/limits.h> 46 #include <sys/clock.h> 47 #include <sys/proc.h> 48 49 #include <xen/xen-os.h> 50 #include <xen/features.h> 51 #include <xen/xen_intr.h> 52 #include <xen/hypervisor.h> 53 #include <contrib/xen/io/xenbus.h> 54 #include <contrib/xen/vcpu.h> 55 #include <xen/error.h> 56 57 #include <machine/cpu.h> 58 #include <machine/cpufunc.h> 59 #include <machine/clock.h> 60 #include <machine/_inttypes.h> 61 #include <machine/smp.h> 62 #include <machine/pvclock.h> 63 64 #include <dev/xen/timer/timer.h> 65 66 #include "clock_if.h" 67 68 #define NSEC_IN_SEC 1000000000ULL 69 #define NSEC_IN_USEC 1000ULL 70 /* 18446744073 = int(2^64 / NSEC_IN_SC) = 1 ns in 64-bit fractions */ 71 #define FRAC_IN_NSEC 18446744073LL 72 73 /* Xen timers may fire up to 100us off */ 74 #define XENTIMER_MIN_PERIOD_IN_NSEC 100*NSEC_IN_USEC 75 76 /* 77 * The real resolution of the PV clock is 1ns, but the highest 78 * resolution that FreeBSD supports is 1us, so just use that. 79 */ 80 #define XENCLOCK_RESOLUTION 1 81 82 #define XENTIMER_QUALITY 950 83 84 struct xentimer_pcpu_data { 85 uint64_t timer; 86 uint64_t last_processed; 87 void *irq_handle; 88 }; 89 90 DPCPU_DEFINE(struct xentimer_pcpu_data, xentimer_pcpu); 91 92 DPCPU_DECLARE(struct vcpu_info *, vcpu_info); 93 94 struct xentimer_softc { 95 device_t dev; 96 struct timecounter tc; 97 struct eventtimer et; 98 }; 99 100 static void 101 xentimer_identify(driver_t *driver, device_t parent) 102 { 103 if (!xen_domain()) 104 return; 105 106 /* Handle all Xen PV timers in one device instance. */ 107 if (devclass_get_device(devclass_find(driver->name), 0)) 108 return; 109 110 BUS_ADD_CHILD(parent, 0, driver->name, 0); 111 } 112 113 static int 114 xentimer_probe(device_t dev) 115 { 116 KASSERT((xen_domain()), ("Trying to use Xen timer on bare metal")); 117 /* 118 * In order to attach, this driver requires the following: 119 * - Vector callback support by the hypervisor, in order to deliver 120 * timer interrupts to the correct CPU for CPUs other than 0. 121 * - Access to the hypervisor shared info page, in order to look up 122 * each VCPU's timer information and the Xen wallclock time. 123 * - The hypervisor must say its PV clock is "safe" to use. 124 * - The hypervisor must support VCPUOP hypercalls. 125 * - The maximum number of CPUs supported by FreeBSD must not exceed 126 * the number of VCPUs supported by the hypervisor. 127 */ 128 #define XTREQUIRES(condition, reason...) \ 129 if (!(condition)) { \ 130 device_printf(dev, ## reason); \ 131 device_detach(dev); \ 132 return (ENXIO); \ 133 } 134 135 if (xen_hvm_domain()) { 136 XTREQUIRES(xen_vector_callback_enabled, 137 "vector callbacks unavailable\n"); 138 XTREQUIRES(xen_feature(XENFEAT_hvm_safe_pvclock), 139 "HVM safe pvclock unavailable\n"); 140 } 141 XTREQUIRES(HYPERVISOR_shared_info != NULL, 142 "shared info page unavailable\n"); 143 XTREQUIRES(HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 0, NULL) == 0, 144 "VCPUOPs interface unavailable\n"); 145 #undef XTREQUIRES 146 device_set_desc(dev, "Xen PV Clock"); 147 return (BUS_PROBE_NOWILDCARD); 148 } 149 150 /** 151 * \brief Get the current time, in nanoseconds, since the hypervisor booted. 152 * 153 * \param vcpu vcpu_info structure to fetch the time from. 154 * 155 */ 156 static uint64_t 157 xen_fetch_vcpu_time(struct vcpu_info *vcpu) 158 { 159 struct pvclock_vcpu_time_info *time; 160 161 time = (struct pvclock_vcpu_time_info *) &vcpu->time; 162 163 return (pvclock_get_timecount(time)); 164 } 165 166 static uint32_t 167 xentimer_get_timecount(struct timecounter *tc) 168 { 169 uint64_t vcpu_time; 170 171 /* 172 * We don't disable preemption here because the worst that can 173 * happen is reading the vcpu_info area of a different CPU than 174 * the one we are currently running on, but that would also 175 * return a valid tc (and we avoid the overhead of 176 * critical_{enter/exit} calls). 177 */ 178 vcpu_time = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 179 180 return (vcpu_time & UINT32_MAX); 181 } 182 183 /** 184 * \brief Fetch the hypervisor boot time, known as the "Xen wallclock". 185 * 186 * \param ts Timespec to store the current stable value. 187 * \param version Pointer to store the corresponding wallclock version. 188 * 189 * \note This value is updated when Domain-0 shifts its clock to follow 190 * clock drift, e.g. as detected by NTP. 191 */ 192 static void 193 xen_fetch_wallclock(struct timespec *ts) 194 { 195 shared_info_t *src = HYPERVISOR_shared_info; 196 struct pvclock_wall_clock *wc; 197 198 wc = (struct pvclock_wall_clock *) &src->wc_version; 199 200 pvclock_get_wallclock(wc, ts); 201 } 202 203 static void 204 xen_fetch_uptime(struct timespec *ts) 205 { 206 uint64_t uptime; 207 208 uptime = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 209 210 ts->tv_sec = uptime / NSEC_IN_SEC; 211 ts->tv_nsec = uptime % NSEC_IN_SEC; 212 } 213 214 static int 215 xentimer_settime(device_t dev __unused, struct timespec *ts) 216 { 217 struct xen_platform_op settime; 218 int ret; 219 220 /* 221 * Don't return EINVAL here; just silently fail if the domain isn't 222 * privileged enough to set the TOD. 223 */ 224 if (!xen_initial_domain()) 225 return (0); 226 227 settime.cmd = XENPF_settime64; 228 settime.u.settime64.mbz = 0; 229 settime.u.settime64.secs = ts->tv_sec; 230 settime.u.settime64.nsecs = ts->tv_nsec; 231 settime.u.settime64.system_time = 232 xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 233 234 ret = HYPERVISOR_platform_op(&settime); 235 ret = ret != 0 ? xen_translate_error(ret) : 0; 236 if (ret != 0 && bootverbose) 237 device_printf(dev, "failed to set Xen PV clock: %d\n", ret); 238 239 return (ret); 240 } 241 242 /** 243 * \brief Return current time according to the Xen Hypervisor wallclock. 244 * 245 * \param dev Xentimer device. 246 * \param ts Pointer to store the wallclock time. 247 * 248 * \note The Xen time structures document the hypervisor start time and the 249 * uptime-since-hypervisor-start (in nsec.) They need to be combined 250 * in order to calculate a TOD clock. 251 */ 252 static int 253 xentimer_gettime(device_t dev, struct timespec *ts) 254 { 255 struct timespec u_ts; 256 257 timespecclear(ts); 258 xen_fetch_wallclock(ts); 259 xen_fetch_uptime(&u_ts); 260 timespecadd(ts, &u_ts, ts); 261 262 return (0); 263 } 264 265 /** 266 * \brief Handle a timer interrupt for the Xen PV timer driver. 267 * 268 * \param arg Xen timer driver softc that is expecting the interrupt. 269 */ 270 static int 271 xentimer_intr(void *arg) 272 { 273 struct xentimer_softc *sc = (struct xentimer_softc *)arg; 274 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 275 276 pcpu->last_processed = xen_fetch_vcpu_time(DPCPU_GET(vcpu_info)); 277 if (pcpu->timer != 0 && sc->et.et_active) 278 sc->et.et_event_cb(&sc->et, sc->et.et_arg); 279 280 return (FILTER_HANDLED); 281 } 282 283 static int 284 xentimer_vcpu_start_timer(int vcpu, uint64_t next_time) 285 { 286 struct vcpu_set_singleshot_timer single; 287 288 single.timeout_abs_ns = next_time; 289 /* Get an event anyway, even if the timeout is already expired */ 290 single.flags = 0; 291 return (HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, vcpu, &single)); 292 } 293 294 static int 295 xentimer_vcpu_stop_timer(int vcpu) 296 { 297 298 return (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, vcpu, NULL)); 299 } 300 301 /** 302 * \brief Set the next oneshot time for the current CPU. 303 * 304 * \param et Xen timer driver event timer to schedule on. 305 * \param first Delta to the next time to schedule the interrupt for. 306 * \param period Not used. 307 * 308 * \note See eventtimers(9) for more information. 309 * \note 310 * 311 * \returns 0 312 */ 313 static int 314 xentimer_et_start(struct eventtimer *et, 315 sbintime_t first, sbintime_t period) 316 { 317 int error; 318 struct xentimer_softc *sc = et->et_priv; 319 int cpu = PCPU_GET(vcpu_id); 320 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 321 struct vcpu_info *vcpu = DPCPU_GET(vcpu_info); 322 uint64_t first_in_ns, next_time; 323 #ifdef INVARIANTS 324 struct thread *td = curthread; 325 #endif 326 327 KASSERT(td->td_critnest != 0, 328 ("xentimer_et_start called without preemption disabled")); 329 330 /* See sbttots() for this formula. */ 331 first_in_ns = (((first >> 32) * NSEC_IN_SEC) + 332 (((uint64_t)NSEC_IN_SEC * (uint32_t)first) >> 32)); 333 334 next_time = xen_fetch_vcpu_time(vcpu) + first_in_ns; 335 error = xentimer_vcpu_start_timer(cpu, next_time); 336 if (error) 337 panic("%s: Error %d setting singleshot timer to %"PRIu64"\n", 338 device_get_nameunit(sc->dev), error, next_time); 339 340 pcpu->timer = next_time; 341 return (error); 342 } 343 344 /** 345 * \brief Cancel the event timer's currently running timer, if any. 346 */ 347 static int 348 xentimer_et_stop(struct eventtimer *et) 349 { 350 int cpu = PCPU_GET(vcpu_id); 351 struct xentimer_pcpu_data *pcpu = DPCPU_PTR(xentimer_pcpu); 352 353 pcpu->timer = 0; 354 return (xentimer_vcpu_stop_timer(cpu)); 355 } 356 357 /** 358 * \brief Attach a Xen PV timer driver instance. 359 * 360 * \param dev Bus device object to attach. 361 * 362 * \note 363 * \returns EINVAL 364 */ 365 static int 366 xentimer_attach(device_t dev) 367 { 368 struct xentimer_softc *sc = device_get_softc(dev); 369 int error, i; 370 371 sc->dev = dev; 372 373 /* Bind an event channel to a VIRQ on each VCPU. */ 374 CPU_FOREACH(i) { 375 struct xentimer_pcpu_data *pcpu; 376 377 pcpu = DPCPU_ID_PTR(i, xentimer_pcpu); 378 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 379 if (error) { 380 device_printf(dev, "Error disabling Xen periodic timer " 381 "on CPU %d\n", i); 382 return (error); 383 } 384 385 error = xen_intr_bind_virq(dev, VIRQ_TIMER, i, xentimer_intr, 386 NULL, sc, INTR_TYPE_CLK, &pcpu->irq_handle); 387 if (error) { 388 device_printf(dev, "Error %d binding VIRQ_TIMER " 389 "to VCPU %d\n", error, i); 390 return (error); 391 } 392 xen_intr_describe(pcpu->irq_handle, "c%d", i); 393 } 394 395 /* Register the event timer. */ 396 sc->et.et_name = "XENTIMER"; 397 sc->et.et_quality = XENTIMER_QUALITY; 398 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; 399 sc->et.et_frequency = NSEC_IN_SEC; 400 /* See tstosbt() for this formula */ 401 sc->et.et_min_period = (XENTIMER_MIN_PERIOD_IN_NSEC * 402 (((uint64_t)1 << 63) / 500000000) >> 32); 403 sc->et.et_max_period = ((sbintime_t)4 << 32); 404 sc->et.et_start = xentimer_et_start; 405 sc->et.et_stop = xentimer_et_stop; 406 sc->et.et_priv = sc; 407 et_register(&sc->et); 408 409 /* Register the timecounter. */ 410 sc->tc.tc_name = "XENTIMER"; 411 sc->tc.tc_quality = XENTIMER_QUALITY; 412 /* 413 * FIXME: due to the lack of ordering during resume, FreeBSD cannot 414 * guarantee that the Xen PV timer is resumed before any other device 415 * attempts to make use of it, so mark it as not safe for suspension 416 * (ie: remove the TC_FLAGS_SUSPEND_SAFE flag). 417 * 418 * NB: This was not a problem in previous FreeBSD versions because the 419 * timer was directly attached to the nexus, but it is an issue now 420 * that the timer is attached to the xenpv bus, and thus resumed 421 * later. 422 * 423 * sc->tc.tc_flags = TC_FLAGS_SUSPEND_SAFE; 424 */ 425 /* 426 * The underlying resolution is in nanoseconds, since the timer info 427 * scales TSC frequencies using a fraction that represents time in 428 * terms of nanoseconds. 429 */ 430 sc->tc.tc_frequency = NSEC_IN_SEC; 431 sc->tc.tc_counter_mask = ~0u; 432 sc->tc.tc_get_timecount = xentimer_get_timecount; 433 sc->tc.tc_priv = sc; 434 tc_init(&sc->tc); 435 436 /* Register the Hypervisor wall clock */ 437 clock_register(dev, XENCLOCK_RESOLUTION); 438 439 return (0); 440 } 441 442 static int 443 xentimer_detach(device_t dev) 444 { 445 446 /* Implement Xen PV clock teardown - XXX see hpet_detach ? */ 447 /* If possible: 448 * 1. need to deregister timecounter 449 * 2. need to deregister event timer 450 * 3. need to deregister virtual IRQ event channels 451 */ 452 return (EBUSY); 453 } 454 455 static void 456 xentimer_percpu_resume(void *arg) 457 { 458 device_t dev = (device_t) arg; 459 struct xentimer_softc *sc = device_get_softc(dev); 460 461 xentimer_et_start(&sc->et, sc->et.et_min_period, 0); 462 } 463 464 static int 465 xentimer_resume(device_t dev) 466 { 467 int error; 468 int i; 469 470 /* Disable the periodic timer */ 471 CPU_FOREACH(i) { 472 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, i, NULL); 473 if (error != 0) { 474 device_printf(dev, 475 "Error disabling Xen periodic timer on CPU %d\n", 476 i); 477 return (error); 478 } 479 } 480 481 /* Reset the last uptime value */ 482 pvclock_resume(); 483 484 /* Reset the RTC clock */ 485 inittodr(time_second); 486 487 /* Kick the timers on all CPUs */ 488 smp_rendezvous(NULL, xentimer_percpu_resume, NULL, dev); 489 490 if (bootverbose) 491 device_printf(dev, "resumed operation after suspension\n"); 492 493 return (0); 494 } 495 496 static int 497 xentimer_suspend(device_t dev) 498 { 499 return (0); 500 } 501 502 /* 503 * Xen early clock init 504 */ 505 void 506 xen_clock_init(void) 507 { 508 } 509 510 /* 511 * Xen PV DELAY function 512 * 513 * When running on PVH mode we don't have an emulated i8524, so 514 * make use of the Xen time info in order to code a simple DELAY 515 * function that can be used during early boot. 516 */ 517 void 518 xen_delay(int n) 519 { 520 struct vcpu_info *vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 521 uint64_t end_ns; 522 uint64_t current; 523 524 end_ns = xen_fetch_vcpu_time(vcpu); 525 end_ns += n * NSEC_IN_USEC; 526 527 for (;;) { 528 current = xen_fetch_vcpu_time(vcpu); 529 if (current >= end_ns) 530 break; 531 } 532 } 533 534 static device_method_t xentimer_methods[] = { 535 DEVMETHOD(device_identify, xentimer_identify), 536 DEVMETHOD(device_probe, xentimer_probe), 537 DEVMETHOD(device_attach, xentimer_attach), 538 DEVMETHOD(device_detach, xentimer_detach), 539 DEVMETHOD(device_suspend, xentimer_suspend), 540 DEVMETHOD(device_resume, xentimer_resume), 541 /* clock interface */ 542 DEVMETHOD(clock_gettime, xentimer_gettime), 543 DEVMETHOD(clock_settime, xentimer_settime), 544 DEVMETHOD_END 545 }; 546 547 static driver_t xentimer_driver = { 548 "xen_et", 549 xentimer_methods, 550 sizeof(struct xentimer_softc), 551 }; 552 553 DRIVER_MODULE(xentimer, xenpv, xentimer_driver, 0, 0); 554 MODULE_DEPEND(xentimer, xenpv, 1, 1, 1); 555