1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp 3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 33 #if defined(__amd64__) 34 #define DEV_APIC 35 #else 36 #include "opt_apic.h" 37 #endif 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/bus.h> 41 #include <sys/kernel.h> 42 #include <sys/module.h> 43 #include <sys/proc.h> 44 #include <sys/rman.h> 45 #include <sys/mman.h> 46 #include <sys/time.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 #include <sys/vdso.h> 52 53 #include <contrib/dev/acpica/include/acpi.h> 54 #include <contrib/dev/acpica/include/accommon.h> 55 56 #include <dev/acpica/acpivar.h> 57 #include <dev/acpica/acpi_hpet.h> 58 59 #ifdef DEV_APIC 60 #include "pcib_if.h" 61 #endif 62 63 #define HPET_VENDID_AMD 0x4353 64 #define HPET_VENDID_AMD2 0x1022 65 #define HPET_VENDID_HYGON 0x1d94 66 #define HPET_VENDID_INTEL 0x8086 67 #define HPET_VENDID_NVIDIA 0x10de 68 #define HPET_VENDID_SW 0x1166 69 70 ACPI_SERIAL_DECL(hpet, "ACPI HPET support"); 71 72 static devclass_t hpet_devclass; 73 74 /* ACPI CA debugging */ 75 #define _COMPONENT ACPI_TIMER 76 ACPI_MODULE_NAME("HPET") 77 78 struct hpet_softc { 79 device_t dev; 80 int mem_rid; 81 int intr_rid; 82 int irq; 83 int useirq; 84 int legacy_route; 85 int per_cpu; 86 uint32_t allowed_irqs; 87 struct resource *mem_res; 88 struct resource *intr_res; 89 void *intr_handle; 90 ACPI_HANDLE handle; 91 uint32_t acpi_uid; 92 uint64_t freq; 93 uint32_t caps; 94 struct timecounter tc; 95 struct hpet_timer { 96 struct eventtimer et; 97 struct hpet_softc *sc; 98 int num; 99 int mode; 100 #define TIMER_STOPPED 0 101 #define TIMER_PERIODIC 1 102 #define TIMER_ONESHOT 2 103 int intr_rid; 104 int irq; 105 int pcpu_cpu; 106 int pcpu_misrouted; 107 int pcpu_master; 108 int pcpu_slaves[MAXCPU]; 109 struct resource *intr_res; 110 void *intr_handle; 111 uint32_t caps; 112 uint32_t vectors; 113 uint32_t div; 114 uint32_t next; 115 char name[8]; 116 } t[32]; 117 int num_timers; 118 struct cdev *pdev; 119 int mmap_allow; 120 int mmap_allow_write; 121 }; 122 123 static d_open_t hpet_open; 124 static d_mmap_t hpet_mmap; 125 126 static struct cdevsw hpet_cdevsw = { 127 .d_version = D_VERSION, 128 .d_name = "hpet", 129 .d_open = hpet_open, 130 .d_mmap = hpet_mmap, 131 }; 132 133 static u_int hpet_get_timecount(struct timecounter *tc); 134 static void hpet_test(struct hpet_softc *sc); 135 136 static char *hpet_ids[] = { "PNP0103", NULL }; 137 138 /* Knob to disable acpi_hpet device */ 139 bool acpi_hpet_disabled = false; 140 141 static u_int 142 hpet_get_timecount(struct timecounter *tc) 143 { 144 struct hpet_softc *sc; 145 146 sc = tc->tc_priv; 147 return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER)); 148 } 149 150 uint32_t 151 hpet_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 152 { 153 struct hpet_softc *sc; 154 155 sc = tc->tc_priv; 156 vdso_th->th_algo = VDSO_TH_ALGO_X86_HPET; 157 vdso_th->th_x86_shift = 0; 158 vdso_th->th_x86_hpet_idx = device_get_unit(sc->dev); 159 vdso_th->th_x86_pvc_last_systime = 0; 160 vdso_th->th_x86_pvc_stable_mask = 0; 161 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 162 return (sc->mmap_allow != 0); 163 } 164 165 #ifdef COMPAT_FREEBSD32 166 uint32_t 167 hpet_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 168 struct timecounter *tc) 169 { 170 struct hpet_softc *sc; 171 172 sc = tc->tc_priv; 173 vdso_th32->th_algo = VDSO_TH_ALGO_X86_HPET; 174 vdso_th32->th_x86_shift = 0; 175 vdso_th32->th_x86_hpet_idx = device_get_unit(sc->dev); 176 vdso_th32->th_x86_pvc_last_systime = 0; 177 vdso_th32->th_x86_pvc_stable_mask = 0; 178 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 179 return (sc->mmap_allow != 0); 180 } 181 #endif 182 183 static void 184 hpet_enable(struct hpet_softc *sc) 185 { 186 uint32_t val; 187 188 val = bus_read_4(sc->mem_res, HPET_CONFIG); 189 if (sc->legacy_route) 190 val |= HPET_CNF_LEG_RT; 191 else 192 val &= ~HPET_CNF_LEG_RT; 193 val |= HPET_CNF_ENABLE; 194 bus_write_4(sc->mem_res, HPET_CONFIG, val); 195 } 196 197 static void 198 hpet_disable(struct hpet_softc *sc) 199 { 200 uint32_t val; 201 202 val = bus_read_4(sc->mem_res, HPET_CONFIG); 203 val &= ~HPET_CNF_ENABLE; 204 bus_write_4(sc->mem_res, HPET_CONFIG, val); 205 } 206 207 static int 208 hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period) 209 { 210 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 211 struct hpet_timer *t; 212 struct hpet_softc *sc = mt->sc; 213 uint32_t fdiv, now; 214 215 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 216 if (period != 0) { 217 t->mode = TIMER_PERIODIC; 218 t->div = (sc->freq * period) >> 32; 219 } else { 220 t->mode = TIMER_ONESHOT; 221 t->div = 0; 222 } 223 if (first != 0) 224 fdiv = (sc->freq * first) >> 32; 225 else 226 fdiv = t->div; 227 if (t->irq < 0) 228 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 229 t->caps |= HPET_TCNF_INT_ENB; 230 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 231 restart: 232 t->next = now + fdiv; 233 if (t->mode == TIMER_PERIODIC && (t->caps & HPET_TCAP_PER_INT)) { 234 t->caps |= HPET_TCNF_TYPE; 235 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 236 t->caps | HPET_TCNF_VAL_SET); 237 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 238 t->next); 239 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 240 t->div); 241 } else { 242 t->caps &= ~HPET_TCNF_TYPE; 243 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 244 t->caps); 245 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 246 t->next); 247 } 248 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 249 if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) { 250 fdiv *= 2; 251 goto restart; 252 } 253 return (0); 254 } 255 256 static int 257 hpet_stop(struct eventtimer *et) 258 { 259 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 260 struct hpet_timer *t; 261 struct hpet_softc *sc = mt->sc; 262 263 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 264 t->mode = TIMER_STOPPED; 265 t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE); 266 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 267 return (0); 268 } 269 270 static int 271 hpet_intr_single(void *arg) 272 { 273 struct hpet_timer *t = (struct hpet_timer *)arg; 274 struct hpet_timer *mt; 275 struct hpet_softc *sc = t->sc; 276 uint32_t now; 277 278 if (t->mode == TIMER_STOPPED) 279 return (FILTER_STRAY); 280 /* Check that per-CPU timer interrupt reached right CPU. */ 281 if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) { 282 if ((++t->pcpu_misrouted) % 32 == 0) { 283 printf("HPET interrupt routed to the wrong CPU" 284 " (timer %d CPU %d -> %d)!\n", 285 t->num, t->pcpu_cpu, curcpu); 286 } 287 288 /* 289 * Reload timer, hoping that next time may be more lucky 290 * (system will manage proper interrupt binding). 291 */ 292 if ((t->mode == TIMER_PERIODIC && 293 (t->caps & HPET_TCAP_PER_INT) == 0) || 294 t->mode == TIMER_ONESHOT) { 295 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) + 296 sc->freq / 8; 297 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 298 t->next); 299 } 300 return (FILTER_HANDLED); 301 } 302 if (t->mode == TIMER_PERIODIC && 303 (t->caps & HPET_TCAP_PER_INT) == 0) { 304 t->next += t->div; 305 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 306 if ((int32_t)((now + t->div / 2) - t->next) > 0) 307 t->next = now + t->div / 2; 308 bus_write_4(sc->mem_res, 309 HPET_TIMER_COMPARATOR(t->num), t->next); 310 } else if (t->mode == TIMER_ONESHOT) 311 t->mode = TIMER_STOPPED; 312 mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master]; 313 if (mt->et.et_active) 314 mt->et.et_event_cb(&mt->et, mt->et.et_arg); 315 return (FILTER_HANDLED); 316 } 317 318 static int 319 hpet_intr(void *arg) 320 { 321 struct hpet_softc *sc = (struct hpet_softc *)arg; 322 int i; 323 uint32_t val; 324 325 val = bus_read_4(sc->mem_res, HPET_ISR); 326 if (val) { 327 bus_write_4(sc->mem_res, HPET_ISR, val); 328 val &= sc->useirq; 329 for (i = 0; i < sc->num_timers; i++) { 330 if ((val & (1 << i)) == 0) 331 continue; 332 hpet_intr_single(&sc->t[i]); 333 } 334 return (FILTER_HANDLED); 335 } 336 return (FILTER_STRAY); 337 } 338 339 uint32_t 340 hpet_get_uid(device_t dev) 341 { 342 struct hpet_softc *sc; 343 344 sc = device_get_softc(dev); 345 return (sc->acpi_uid); 346 } 347 348 static ACPI_STATUS 349 hpet_find(ACPI_HANDLE handle, UINT32 level, void *context, 350 void **status) 351 { 352 char **ids; 353 uint32_t id = (uint32_t)(uintptr_t)context; 354 uint32_t uid = 0; 355 356 for (ids = hpet_ids; *ids != NULL; ids++) { 357 if (acpi_MatchHid(handle, *ids)) 358 break; 359 } 360 if (*ids == NULL) 361 return (AE_OK); 362 if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) || 363 id == uid) 364 *status = acpi_get_device(handle); 365 return (AE_OK); 366 } 367 368 /* 369 * Find an existing IRQ resource that matches the requested IRQ range 370 * and return its RID. If one is not found, use a new RID. 371 */ 372 static int 373 hpet_find_irq_rid(device_t dev, u_long start, u_long end) 374 { 375 rman_res_t irq; 376 int error, rid; 377 378 for (rid = 0;; rid++) { 379 error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL); 380 if (error != 0 || (start <= irq && irq <= end)) 381 return (rid); 382 } 383 } 384 385 static int 386 hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td) 387 { 388 struct hpet_softc *sc; 389 390 sc = cdev->si_drv1; 391 if (!sc->mmap_allow) 392 return (EPERM); 393 else 394 return (0); 395 } 396 397 static int 398 hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, 399 int nprot, vm_memattr_t *memattr) 400 { 401 struct hpet_softc *sc; 402 403 sc = cdev->si_drv1; 404 if (offset >= rman_get_size(sc->mem_res)) 405 return (EINVAL); 406 if (!sc->mmap_allow_write && (nprot & PROT_WRITE)) 407 return (EPERM); 408 *paddr = rman_get_start(sc->mem_res) + offset; 409 *memattr = VM_MEMATTR_UNCACHEABLE; 410 411 return (0); 412 } 413 414 /* Discover the HPET via the ACPI table of the same name. */ 415 static void 416 hpet_identify(driver_t *driver, device_t parent) 417 { 418 ACPI_TABLE_HPET *hpet; 419 ACPI_STATUS status; 420 device_t child; 421 int i; 422 423 /* Only one HPET device can be added. */ 424 if (devclass_get_device(hpet_devclass, 0)) 425 return; 426 for (i = 1; ; i++) { 427 /* Search for HPET table. */ 428 status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet); 429 if (ACPI_FAILURE(status)) 430 return; 431 /* Search for HPET device with same ID. */ 432 child = NULL; 433 AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 434 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence, 435 (void *)&child); 436 /* If found - let it be probed in normal way. */ 437 if (child) { 438 if (bus_get_resource(child, SYS_RES_MEMORY, 0, 439 NULL, NULL) != 0) 440 bus_set_resource(child, SYS_RES_MEMORY, 0, 441 hpet->Address.Address, HPET_MEM_WIDTH); 442 continue; 443 } 444 /* If not - create it from table info. */ 445 child = BUS_ADD_CHILD(parent, 2, "hpet", 0); 446 if (child == NULL) { 447 printf("%s: can't add child\n", __func__); 448 continue; 449 } 450 bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address, 451 HPET_MEM_WIDTH); 452 } 453 } 454 455 static int 456 hpet_probe(device_t dev) 457 { 458 int rv; 459 460 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 461 if (acpi_disabled("hpet") || acpi_hpet_disabled) 462 return (ENXIO); 463 if (acpi_get_handle(dev) != NULL) 464 rv = ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids, NULL); 465 else 466 rv = 0; 467 if (rv <= 0) 468 device_set_desc(dev, "High Precision Event Timer"); 469 return (rv); 470 } 471 472 static int 473 hpet_attach(device_t dev) 474 { 475 struct hpet_softc *sc; 476 struct hpet_timer *t; 477 struct make_dev_args mda; 478 int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu; 479 int pcpu_master, error; 480 rman_res_t hpet_region_size; 481 static int maxhpetet = 0; 482 uint32_t val, val2, cvectors, dvectors; 483 uint16_t vendor, rev; 484 485 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 486 487 sc = device_get_softc(dev); 488 sc->dev = dev; 489 sc->handle = acpi_get_handle(dev); 490 491 sc->mem_rid = 0; 492 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 493 RF_ACTIVE); 494 if (sc->mem_res == NULL) 495 return (ENOMEM); 496 497 hpet_region_size = rman_get_size(sc->mem_res); 498 /* Validate that the region is big enough for the control registers. */ 499 if (hpet_region_size < HPET_MEM_MIN_WIDTH) { 500 device_printf(dev, "memory region width %jd too small\n", 501 hpet_region_size); 502 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 503 return (ENXIO); 504 } 505 506 /* Be sure timer is enabled. */ 507 hpet_enable(sc); 508 509 /* Read basic statistics about the timer. */ 510 val = bus_read_4(sc->mem_res, HPET_PERIOD); 511 if (val == 0) { 512 device_printf(dev, "invalid period\n"); 513 hpet_disable(sc); 514 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 515 return (ENXIO); 516 } 517 518 sc->freq = (1000000000000000LL + val / 2) / val; 519 sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES); 520 vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16; 521 rev = sc->caps & HPET_CAP_REV_ID; 522 num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8); 523 /* 524 * ATI/AMD violates IA-PC HPET (High Precision Event Timers) 525 * Specification and provides an off by one number 526 * of timers/comparators. 527 * Additionally, they use unregistered value in VENDOR_ID field. 528 */ 529 if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0) 530 num_timers--; 531 /* 532 * Now validate that the region is big enough to address all counters. 533 */ 534 if (hpet_region_size < HPET_TIMER_CAP_CNF(num_timers)) { 535 device_printf(dev, 536 "memory region width %jd too small for %d timers\n", 537 hpet_region_size, num_timers); 538 hpet_disable(sc); 539 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 540 return (ENXIO); 541 } 542 543 sc->num_timers = num_timers; 544 if (bootverbose) { 545 device_printf(dev, 546 "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n", 547 vendor, rev, sc->freq, 548 (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "", 549 num_timers, 550 (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : ""); 551 } 552 for (i = 0; i < num_timers; i++) { 553 t = &sc->t[i]; 554 t->sc = sc; 555 t->num = i; 556 t->mode = TIMER_STOPPED; 557 t->intr_rid = -1; 558 t->irq = -1; 559 t->pcpu_cpu = -1; 560 t->pcpu_misrouted = 0; 561 t->pcpu_master = -1; 562 t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i)); 563 t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4); 564 if (bootverbose) { 565 device_printf(dev, 566 " t%d: irqs 0x%08x (%d)%s%s%s\n", i, 567 t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9, 568 (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "", 569 (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "", 570 (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : ""); 571 } 572 } 573 if (testenv("debug.acpi.hpet_test")) 574 hpet_test(sc); 575 /* 576 * Don't attach if the timer never increments. Since the spec 577 * requires it to be at least 10 MHz, it has to change in 1 us. 578 */ 579 val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 580 DELAY(1); 581 val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 582 if (val == val2) { 583 device_printf(dev, "HPET never increments, disabling\n"); 584 hpet_disable(sc); 585 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 586 return (ENXIO); 587 } 588 /* Announce first HPET as timecounter. */ 589 if (device_get_unit(dev) == 0) { 590 sc->tc.tc_get_timecount = hpet_get_timecount, 591 sc->tc.tc_counter_mask = ~0u, 592 sc->tc.tc_name = "HPET", 593 sc->tc.tc_quality = 950, 594 sc->tc.tc_frequency = sc->freq; 595 sc->tc.tc_priv = sc; 596 sc->tc.tc_fill_vdso_timehands = hpet_vdso_timehands; 597 #ifdef COMPAT_FREEBSD32 598 sc->tc.tc_fill_vdso_timehands32 = hpet_vdso_timehands32; 599 #endif 600 tc_init(&sc->tc); 601 } 602 /* If not disabled - setup and announce event timers. */ 603 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 604 "clock", &i) == 0 && i == 0) 605 return (0); 606 607 /* Check whether we can and want legacy routing. */ 608 sc->legacy_route = 0; 609 resource_int_value(device_get_name(dev), device_get_unit(dev), 610 "legacy_route", &sc->legacy_route); 611 if ((sc->caps & HPET_CAP_LEG_RT) == 0) 612 sc->legacy_route = 0; 613 if (sc->legacy_route) { 614 sc->t[0].vectors = 0; 615 sc->t[1].vectors = 0; 616 } 617 618 /* Check what IRQs we want use. */ 619 /* By default allow any PCI IRQs. */ 620 sc->allowed_irqs = 0xffff0000; 621 /* 622 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16 623 * Lower are also not always working for different reasons. 624 * SB800 fixed it, but seems do not implements level triggering 625 * properly, that makes it very unreliable - it freezes after any 626 * interrupt loss. Avoid legacy IRQs for AMD. 627 */ 628 if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2 || 629 vendor == HPET_VENDID_HYGON) 630 sc->allowed_irqs = 0x00000000; 631 /* 632 * NVidia MCP5x chipsets have number of unexplained interrupt 633 * problems. For some reason, using HPET interrupts breaks HDA sound. 634 */ 635 if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01) 636 sc->allowed_irqs = 0x00000000; 637 /* 638 * ServerWorks HT1000 reported to have problems with IRQs >= 16. 639 * Lower IRQs are working, but allowed mask is not set correctly. 640 * Legacy_route mode works fine. 641 */ 642 if (vendor == HPET_VENDID_SW && rev <= 0x01) 643 sc->allowed_irqs = 0x00000000; 644 /* 645 * Neither QEMU nor VirtualBox report supported IRQs correctly. 646 * The only way to use HPET there is to specify IRQs manually 647 * and/or use legacy_route. Legacy_route mode works on both. 648 */ 649 if (vm_guest) 650 sc->allowed_irqs = 0x00000000; 651 /* Let user override. */ 652 resource_int_value(device_get_name(dev), device_get_unit(dev), 653 "allowed_irqs", &sc->allowed_irqs); 654 655 /* Get how much per-CPU timers we should try to provide. */ 656 sc->per_cpu = 1; 657 resource_int_value(device_get_name(dev), device_get_unit(dev), 658 "per_cpu", &sc->per_cpu); 659 660 num_msi = 0; 661 sc->useirq = 0; 662 /* Find IRQ vectors for all timers. */ 663 cvectors = sc->allowed_irqs & 0xffff0000; 664 dvectors = sc->allowed_irqs & 0x0000ffff; 665 if (sc->legacy_route) 666 dvectors &= 0x0000fefe; 667 for (i = 0; i < num_timers; i++) { 668 t = &sc->t[i]; 669 if (sc->legacy_route && i < 2) 670 t->irq = (i == 0) ? 0 : 8; 671 #ifdef DEV_APIC 672 else if (t->caps & HPET_TCAP_FSB_INT_DEL) { 673 if ((j = PCIB_ALLOC_MSIX( 674 device_get_parent(device_get_parent(dev)), dev, 675 &t->irq))) { 676 device_printf(dev, 677 "Can't allocate interrupt for t%d: %d\n", 678 i, j); 679 } 680 } 681 #endif 682 else if (dvectors & t->vectors) { 683 t->irq = ffs(dvectors & t->vectors) - 1; 684 dvectors &= ~(1 << t->irq); 685 } 686 if (t->irq >= 0) { 687 t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq); 688 t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 689 &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE); 690 if (t->intr_res == NULL) { 691 t->irq = -1; 692 device_printf(dev, 693 "Can't map interrupt for t%d.\n", i); 694 } else if (bus_setup_intr(dev, t->intr_res, 695 INTR_TYPE_CLK, hpet_intr_single, NULL, t, 696 &t->intr_handle) != 0) { 697 t->irq = -1; 698 device_printf(dev, 699 "Can't setup interrupt for t%d.\n", i); 700 } else { 701 bus_describe_intr(dev, t->intr_res, 702 t->intr_handle, "t%d", i); 703 num_msi++; 704 } 705 } 706 if (t->irq < 0 && (cvectors & t->vectors) != 0) { 707 cvectors &= t->vectors; 708 sc->useirq |= (1 << i); 709 } 710 } 711 if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0) 712 sc->legacy_route = 0; 713 if (sc->legacy_route) 714 hpet_enable(sc); 715 /* Group timers for per-CPU operation. */ 716 num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu); 717 num_percpu_t = num_percpu_et * mp_ncpus; 718 pcpu_master = 0; 719 cur_cpu = CPU_FIRST(); 720 for (i = 0; i < num_timers; i++) { 721 t = &sc->t[i]; 722 if (t->irq >= 0 && num_percpu_t > 0) { 723 if (cur_cpu == CPU_FIRST()) 724 pcpu_master = i; 725 t->pcpu_cpu = cur_cpu; 726 t->pcpu_master = pcpu_master; 727 sc->t[pcpu_master]. 728 pcpu_slaves[cur_cpu] = i; 729 bus_bind_intr(dev, t->intr_res, cur_cpu); 730 cur_cpu = CPU_NEXT(cur_cpu); 731 num_percpu_t--; 732 } else if (t->irq >= 0) 733 bus_bind_intr(dev, t->intr_res, CPU_FIRST()); 734 } 735 bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff); 736 sc->irq = -1; 737 /* If at least one timer needs legacy IRQ - set it up. */ 738 if (sc->useirq) { 739 j = i = fls(cvectors) - 1; 740 while (j > 0 && (cvectors & (1 << (j - 1))) != 0) 741 j--; 742 sc->intr_rid = hpet_find_irq_rid(dev, j, i); 743 sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 744 &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE); 745 if (sc->intr_res == NULL) 746 device_printf(dev, "Can't map interrupt.\n"); 747 else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, 748 hpet_intr, NULL, sc, &sc->intr_handle) != 0) { 749 device_printf(dev, "Can't setup interrupt.\n"); 750 } else { 751 sc->irq = rman_get_start(sc->intr_res); 752 /* Bind IRQ to BSP to avoid live migration. */ 753 bus_bind_intr(dev, sc->intr_res, CPU_FIRST()); 754 } 755 } 756 /* Program and announce event timers. */ 757 for (i = 0; i < num_timers; i++) { 758 t = &sc->t[i]; 759 t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE); 760 t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB); 761 t->caps &= ~(HPET_TCNF_INT_TYPE); 762 t->caps |= HPET_TCNF_32MODE; 763 if (t->irq >= 0 && sc->legacy_route && i < 2) { 764 /* Legacy route doesn't need more configuration. */ 765 } else 766 #ifdef DEV_APIC 767 if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) { 768 uint64_t addr; 769 uint32_t data; 770 771 if (PCIB_MAP_MSI( 772 device_get_parent(device_get_parent(dev)), dev, 773 t->irq, &addr, &data) == 0) { 774 bus_write_4(sc->mem_res, 775 HPET_TIMER_FSB_ADDR(i), addr); 776 bus_write_4(sc->mem_res, 777 HPET_TIMER_FSB_VAL(i), data); 778 t->caps |= HPET_TCNF_FSB_EN; 779 } else 780 t->irq = -2; 781 } else 782 #endif 783 if (t->irq >= 0) 784 t->caps |= (t->irq << 9); 785 else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq))) 786 t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE; 787 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps); 788 /* Skip event timers without set up IRQ. */ 789 if (t->irq < 0 && 790 (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0)) 791 continue; 792 /* Announce the reset. */ 793 if (maxhpetet == 0) 794 t->et.et_name = "HPET"; 795 else { 796 sprintf(t->name, "HPET%d", maxhpetet); 797 t->et.et_name = t->name; 798 } 799 t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; 800 t->et.et_quality = 450; 801 if (t->pcpu_master >= 0) { 802 t->et.et_flags |= ET_FLAGS_PERCPU; 803 t->et.et_quality += 100; 804 } else if (mp_ncpus >= 8) 805 t->et.et_quality -= 100; 806 if ((t->caps & HPET_TCAP_PER_INT) == 0) 807 t->et.et_quality -= 10; 808 t->et.et_frequency = sc->freq; 809 t->et.et_min_period = 810 ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq; 811 t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq; 812 t->et.et_start = hpet_start; 813 t->et.et_stop = hpet_stop; 814 t->et.et_priv = &sc->t[i]; 815 if (t->pcpu_master < 0 || t->pcpu_master == i) { 816 et_register(&t->et); 817 maxhpetet++; 818 } 819 } 820 acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid); 821 822 make_dev_args_init(&mda); 823 mda.mda_devsw = &hpet_cdevsw; 824 mda.mda_uid = UID_ROOT; 825 mda.mda_gid = GID_WHEEL; 826 mda.mda_mode = 0644; 827 mda.mda_si_drv1 = sc; 828 error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev)); 829 if (error == 0) { 830 sc->mmap_allow = 1; 831 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow", 832 &sc->mmap_allow); 833 sc->mmap_allow_write = 0; 834 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write", 835 &sc->mmap_allow_write); 836 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 837 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 838 OID_AUTO, "mmap_allow", 839 CTLFLAG_RW, &sc->mmap_allow, 0, 840 "Allow userland to memory map HPET"); 841 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 842 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 843 OID_AUTO, "mmap_allow_write", 844 CTLFLAG_RW, &sc->mmap_allow_write, 0, 845 "Allow userland write to the HPET register space"); 846 } else { 847 device_printf(dev, "could not create /dev/hpet%d, error %d\n", 848 device_get_unit(dev), error); 849 } 850 851 return (0); 852 } 853 854 static int 855 hpet_detach(device_t dev) 856 { 857 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 858 859 /* XXX Without a tc_remove() function, we can't detach. */ 860 return (EBUSY); 861 } 862 863 static int 864 hpet_suspend(device_t dev) 865 { 866 // struct hpet_softc *sc; 867 868 /* 869 * Disable the timer during suspend. The timer will not lose 870 * its state in S1 or S2, but we are required to disable 871 * it. 872 */ 873 // sc = device_get_softc(dev); 874 // hpet_disable(sc); 875 876 return (0); 877 } 878 879 static int 880 hpet_resume(device_t dev) 881 { 882 struct hpet_softc *sc; 883 struct hpet_timer *t; 884 int i; 885 886 /* Re-enable the timer after a resume to keep the clock advancing. */ 887 sc = device_get_softc(dev); 888 hpet_enable(sc); 889 /* Restart event timers that were running on suspend. */ 890 for (i = 0; i < sc->num_timers; i++) { 891 t = &sc->t[i]; 892 #ifdef DEV_APIC 893 if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) { 894 uint64_t addr; 895 uint32_t data; 896 897 if (PCIB_MAP_MSI( 898 device_get_parent(device_get_parent(dev)), dev, 899 t->irq, &addr, &data) == 0) { 900 bus_write_4(sc->mem_res, 901 HPET_TIMER_FSB_ADDR(i), addr); 902 bus_write_4(sc->mem_res, 903 HPET_TIMER_FSB_VAL(i), data); 904 } 905 } 906 #endif 907 if (t->mode == TIMER_STOPPED) 908 continue; 909 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 910 if (t->mode == TIMER_PERIODIC && 911 (t->caps & HPET_TCAP_PER_INT) != 0) { 912 t->caps |= HPET_TCNF_TYPE; 913 t->next += t->div; 914 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 915 t->caps | HPET_TCNF_VAL_SET); 916 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 917 t->next); 918 bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num)); 919 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 920 t->div); 921 } else { 922 t->next += sc->freq / 1024; 923 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 924 t->next); 925 } 926 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 927 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 928 } 929 return (0); 930 } 931 932 /* Print some basic latency/rate information to assist in debugging. */ 933 static void 934 hpet_test(struct hpet_softc *sc) 935 { 936 int i; 937 uint32_t u1, u2; 938 struct bintime b0, b1, b2; 939 struct timespec ts; 940 941 binuptime(&b0); 942 binuptime(&b0); 943 binuptime(&b1); 944 u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 945 for (i = 1; i < 1000; i++) 946 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 947 binuptime(&b2); 948 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 949 950 bintime_sub(&b2, &b1); 951 bintime_sub(&b1, &b0); 952 bintime_sub(&b2, &b1); 953 bintime2timespec(&b2, &ts); 954 955 device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n", 956 (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1); 957 958 device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000); 959 } 960 961 #ifdef DEV_APIC 962 static int 963 hpet_remap_intr(device_t dev, device_t child, u_int irq) 964 { 965 struct hpet_softc *sc = device_get_softc(dev); 966 struct hpet_timer *t; 967 uint64_t addr; 968 uint32_t data; 969 int error, i; 970 971 for (i = 0; i < sc->num_timers; i++) { 972 t = &sc->t[i]; 973 if (t->irq != irq) 974 continue; 975 error = PCIB_MAP_MSI( 976 device_get_parent(device_get_parent(dev)), dev, 977 irq, &addr, &data); 978 if (error) 979 return (error); 980 hpet_disable(sc); /* Stop timer to avoid interrupt loss. */ 981 bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr); 982 bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data); 983 hpet_enable(sc); 984 return (0); 985 } 986 return (ENOENT); 987 } 988 #endif 989 990 static device_method_t hpet_methods[] = { 991 /* Device interface */ 992 DEVMETHOD(device_identify, hpet_identify), 993 DEVMETHOD(device_probe, hpet_probe), 994 DEVMETHOD(device_attach, hpet_attach), 995 DEVMETHOD(device_detach, hpet_detach), 996 DEVMETHOD(device_suspend, hpet_suspend), 997 DEVMETHOD(device_resume, hpet_resume), 998 999 #ifdef DEV_APIC 1000 DEVMETHOD(bus_remap_intr, hpet_remap_intr), 1001 #endif 1002 1003 DEVMETHOD_END 1004 }; 1005 1006 static driver_t hpet_driver = { 1007 "hpet", 1008 hpet_methods, 1009 sizeof(struct hpet_softc), 1010 }; 1011 1012 DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0); 1013 MODULE_DEPEND(hpet, acpi, 1, 1, 1); 1014