1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp 3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 33 #if defined(__amd64__) 34 #define DEV_APIC 35 #else 36 #include "opt_apic.h" 37 #endif 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/bus.h> 41 #include <sys/kernel.h> 42 #include <sys/module.h> 43 #include <sys/proc.h> 44 #include <sys/rman.h> 45 #include <sys/mman.h> 46 #include <sys/time.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/timeet.h> 50 #include <sys/timetc.h> 51 #include <sys/vdso.h> 52 53 #include <contrib/dev/acpica/include/acpi.h> 54 #include <contrib/dev/acpica/include/accommon.h> 55 56 #include <dev/acpica/acpivar.h> 57 #include <dev/acpica/acpi_hpet.h> 58 59 #ifdef DEV_APIC 60 #include "pcib_if.h" 61 #endif 62 63 #define HPET_VENDID_AMD 0x4353 64 #define HPET_VENDID_AMD2 0x1022 65 #define HPET_VENDID_HYGON 0x1d94 66 #define HPET_VENDID_INTEL 0x8086 67 #define HPET_VENDID_NVIDIA 0x10de 68 #define HPET_VENDID_SW 0x1166 69 70 ACPI_SERIAL_DECL(hpet, "ACPI HPET support"); 71 72 /* ACPI CA debugging */ 73 #define _COMPONENT ACPI_TIMER 74 ACPI_MODULE_NAME("HPET") 75 76 struct hpet_softc { 77 device_t dev; 78 int mem_rid; 79 int intr_rid; 80 int irq; 81 int useirq; 82 int legacy_route; 83 int per_cpu; 84 uint32_t allowed_irqs; 85 struct resource *mem_res; 86 struct resource *intr_res; 87 void *intr_handle; 88 ACPI_HANDLE handle; 89 uint32_t acpi_uid; 90 uint64_t freq; 91 uint32_t caps; 92 struct timecounter tc; 93 struct hpet_timer { 94 struct eventtimer et; 95 struct hpet_softc *sc; 96 int num; 97 int mode; 98 #define TIMER_STOPPED 0 99 #define TIMER_PERIODIC 1 100 #define TIMER_ONESHOT 2 101 int intr_rid; 102 int irq; 103 int pcpu_cpu; 104 int pcpu_misrouted; 105 int pcpu_master; 106 int pcpu_slaves[MAXCPU]; 107 struct resource *intr_res; 108 void *intr_handle; 109 uint32_t caps; 110 uint32_t vectors; 111 uint32_t div; 112 uint32_t next; 113 char name[8]; 114 } t[32]; 115 int num_timers; 116 struct cdev *pdev; 117 int mmap_allow; 118 int mmap_allow_write; 119 }; 120 121 static d_open_t hpet_open; 122 static d_mmap_t hpet_mmap; 123 124 static struct cdevsw hpet_cdevsw = { 125 .d_version = D_VERSION, 126 .d_name = "hpet", 127 .d_open = hpet_open, 128 .d_mmap = hpet_mmap, 129 }; 130 131 static u_int hpet_get_timecount(struct timecounter *tc); 132 static void hpet_test(struct hpet_softc *sc); 133 134 static char *hpet_ids[] = { "PNP0103", NULL }; 135 136 /* Knob to disable acpi_hpet device */ 137 bool acpi_hpet_disabled = false; 138 139 static u_int 140 hpet_get_timecount(struct timecounter *tc) 141 { 142 struct hpet_softc *sc; 143 144 sc = tc->tc_priv; 145 return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER)); 146 } 147 148 uint32_t 149 hpet_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 150 { 151 struct hpet_softc *sc; 152 153 sc = tc->tc_priv; 154 vdso_th->th_algo = VDSO_TH_ALGO_X86_HPET; 155 vdso_th->th_x86_shift = 0; 156 vdso_th->th_x86_hpet_idx = device_get_unit(sc->dev); 157 vdso_th->th_x86_pvc_last_systime = 0; 158 vdso_th->th_x86_pvc_stable_mask = 0; 159 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 160 return (sc->mmap_allow != 0); 161 } 162 163 #ifdef COMPAT_FREEBSD32 164 uint32_t 165 hpet_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 166 struct timecounter *tc) 167 { 168 struct hpet_softc *sc; 169 170 sc = tc->tc_priv; 171 vdso_th32->th_algo = VDSO_TH_ALGO_X86_HPET; 172 vdso_th32->th_x86_shift = 0; 173 vdso_th32->th_x86_hpet_idx = device_get_unit(sc->dev); 174 vdso_th32->th_x86_pvc_last_systime[0] = 0; 175 vdso_th32->th_x86_pvc_last_systime[1] = 0; 176 vdso_th32->th_x86_pvc_stable_mask = 0; 177 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 178 return (sc->mmap_allow != 0); 179 } 180 #endif 181 182 static void 183 hpet_enable(struct hpet_softc *sc) 184 { 185 uint32_t val; 186 187 val = bus_read_4(sc->mem_res, HPET_CONFIG); 188 if (sc->legacy_route) 189 val |= HPET_CNF_LEG_RT; 190 else 191 val &= ~HPET_CNF_LEG_RT; 192 val |= HPET_CNF_ENABLE; 193 bus_write_4(sc->mem_res, HPET_CONFIG, val); 194 } 195 196 static void 197 hpet_disable(struct hpet_softc *sc) 198 { 199 uint32_t val; 200 201 val = bus_read_4(sc->mem_res, HPET_CONFIG); 202 val &= ~HPET_CNF_ENABLE; 203 bus_write_4(sc->mem_res, HPET_CONFIG, val); 204 } 205 206 static int 207 hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period) 208 { 209 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 210 struct hpet_timer *t; 211 struct hpet_softc *sc = mt->sc; 212 uint32_t fdiv, now; 213 214 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 215 if (period != 0) { 216 t->mode = TIMER_PERIODIC; 217 t->div = (sc->freq * period) >> 32; 218 } else { 219 t->mode = TIMER_ONESHOT; 220 t->div = 0; 221 } 222 if (first != 0) 223 fdiv = (sc->freq * first) >> 32; 224 else 225 fdiv = t->div; 226 if (t->irq < 0) 227 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 228 t->caps |= HPET_TCNF_INT_ENB; 229 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 230 restart: 231 t->next = now + fdiv; 232 if (t->mode == TIMER_PERIODIC && (t->caps & HPET_TCAP_PER_INT)) { 233 t->caps |= HPET_TCNF_TYPE; 234 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 235 t->caps | HPET_TCNF_VAL_SET); 236 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 237 t->next); 238 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 239 t->div); 240 } else { 241 t->caps &= ~HPET_TCNF_TYPE; 242 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 243 t->caps); 244 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 245 t->next); 246 } 247 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 248 if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) { 249 fdiv *= 2; 250 goto restart; 251 } 252 return (0); 253 } 254 255 static int 256 hpet_stop(struct eventtimer *et) 257 { 258 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 259 struct hpet_timer *t; 260 struct hpet_softc *sc = mt->sc; 261 262 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 263 t->mode = TIMER_STOPPED; 264 t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE); 265 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 266 return (0); 267 } 268 269 static int 270 hpet_intr_single(void *arg) 271 { 272 struct hpet_timer *t = (struct hpet_timer *)arg; 273 struct hpet_timer *mt; 274 struct hpet_softc *sc = t->sc; 275 uint32_t now; 276 277 if (t->mode == TIMER_STOPPED) 278 return (FILTER_STRAY); 279 /* Check that per-CPU timer interrupt reached right CPU. */ 280 if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) { 281 if ((++t->pcpu_misrouted) % 32 == 0) { 282 printf("HPET interrupt routed to the wrong CPU" 283 " (timer %d CPU %d -> %d)!\n", 284 t->num, t->pcpu_cpu, curcpu); 285 } 286 287 /* 288 * Reload timer, hoping that next time may be more lucky 289 * (system will manage proper interrupt binding). 290 */ 291 if ((t->mode == TIMER_PERIODIC && 292 (t->caps & HPET_TCAP_PER_INT) == 0) || 293 t->mode == TIMER_ONESHOT) { 294 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) + 295 sc->freq / 8; 296 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 297 t->next); 298 } 299 return (FILTER_HANDLED); 300 } 301 if (t->mode == TIMER_PERIODIC && 302 (t->caps & HPET_TCAP_PER_INT) == 0) { 303 t->next += t->div; 304 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 305 if ((int32_t)((now + t->div / 2) - t->next) > 0) 306 t->next = now + t->div / 2; 307 bus_write_4(sc->mem_res, 308 HPET_TIMER_COMPARATOR(t->num), t->next); 309 } else if (t->mode == TIMER_ONESHOT) 310 t->mode = TIMER_STOPPED; 311 mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master]; 312 if (mt->et.et_active) 313 mt->et.et_event_cb(&mt->et, mt->et.et_arg); 314 return (FILTER_HANDLED); 315 } 316 317 static int 318 hpet_intr(void *arg) 319 { 320 struct hpet_softc *sc = (struct hpet_softc *)arg; 321 int i; 322 uint32_t val; 323 324 val = bus_read_4(sc->mem_res, HPET_ISR); 325 if (val) { 326 bus_write_4(sc->mem_res, HPET_ISR, val); 327 val &= sc->useirq; 328 for (i = 0; i < sc->num_timers; i++) { 329 if ((val & (1 << i)) == 0) 330 continue; 331 hpet_intr_single(&sc->t[i]); 332 } 333 return (FILTER_HANDLED); 334 } 335 return (FILTER_STRAY); 336 } 337 338 uint32_t 339 hpet_get_uid(device_t dev) 340 { 341 struct hpet_softc *sc; 342 343 sc = device_get_softc(dev); 344 return (sc->acpi_uid); 345 } 346 347 static ACPI_STATUS 348 hpet_find(ACPI_HANDLE handle, UINT32 level, void *context, 349 void **status) 350 { 351 char **ids; 352 uint32_t id = (uint32_t)(uintptr_t)context; 353 uint32_t uid = 0; 354 355 for (ids = hpet_ids; *ids != NULL; ids++) { 356 if (acpi_MatchHid(handle, *ids)) 357 break; 358 } 359 if (*ids == NULL) 360 return (AE_OK); 361 if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) || 362 id == uid) 363 *status = acpi_get_device(handle); 364 return (AE_OK); 365 } 366 367 /* 368 * Find an existing IRQ resource that matches the requested IRQ range 369 * and return its RID. If one is not found, use a new RID. 370 */ 371 static int 372 hpet_find_irq_rid(device_t dev, u_long start, u_long end) 373 { 374 rman_res_t irq; 375 int error, rid; 376 377 for (rid = 0;; rid++) { 378 error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL); 379 if (error != 0 || (start <= irq && irq <= end)) 380 return (rid); 381 } 382 } 383 384 static int 385 hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td) 386 { 387 struct hpet_softc *sc; 388 389 sc = cdev->si_drv1; 390 if (!sc->mmap_allow) 391 return (EPERM); 392 else 393 return (0); 394 } 395 396 static int 397 hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, 398 int nprot, vm_memattr_t *memattr) 399 { 400 struct hpet_softc *sc; 401 402 sc = cdev->si_drv1; 403 if (offset >= rman_get_size(sc->mem_res)) 404 return (EINVAL); 405 if (!sc->mmap_allow_write && (nprot & PROT_WRITE)) 406 return (EPERM); 407 *paddr = rman_get_start(sc->mem_res) + offset; 408 *memattr = VM_MEMATTR_UNCACHEABLE; 409 410 return (0); 411 } 412 413 /* Discover the HPET via the ACPI table of the same name. */ 414 static void 415 hpet_identify(driver_t *driver, device_t parent) 416 { 417 ACPI_TABLE_HPET *hpet; 418 ACPI_STATUS status; 419 device_t child; 420 int i; 421 422 /* Only one HPET device can be added. */ 423 if (devclass_get_device(devclass_find("hpet"), 0)) 424 return; 425 for (i = 1; ; i++) { 426 /* Search for HPET table. */ 427 status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet); 428 if (ACPI_FAILURE(status)) 429 return; 430 /* Search for HPET device with same ID. */ 431 child = NULL; 432 AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 433 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence, 434 (void *)&child); 435 /* If found - let it be probed in normal way. */ 436 if (child) { 437 if (bus_get_resource(child, SYS_RES_MEMORY, 0, 438 NULL, NULL) != 0) 439 bus_set_resource(child, SYS_RES_MEMORY, 0, 440 hpet->Address.Address, HPET_MEM_WIDTH); 441 continue; 442 } 443 /* If not - create it from table info. */ 444 child = BUS_ADD_CHILD(parent, 2, "hpet", 0); 445 if (child == NULL) { 446 printf("%s: can't add child\n", __func__); 447 continue; 448 } 449 bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address, 450 HPET_MEM_WIDTH); 451 } 452 } 453 454 static int 455 hpet_probe(device_t dev) 456 { 457 int rv; 458 459 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 460 if (acpi_disabled("hpet") || acpi_hpet_disabled) 461 return (ENXIO); 462 if (acpi_get_handle(dev) != NULL) 463 rv = ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids, NULL); 464 else 465 rv = 0; 466 if (rv <= 0) 467 device_set_desc(dev, "High Precision Event Timer"); 468 return (rv); 469 } 470 471 static int 472 hpet_attach(device_t dev) 473 { 474 struct hpet_softc *sc; 475 struct hpet_timer *t; 476 struct make_dev_args mda; 477 int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu; 478 int pcpu_master, error; 479 rman_res_t hpet_region_size; 480 static int maxhpetet = 0; 481 uint32_t val, val2, cvectors, dvectors; 482 uint16_t vendor, rev; 483 484 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 485 486 sc = device_get_softc(dev); 487 sc->dev = dev; 488 sc->handle = acpi_get_handle(dev); 489 490 sc->mem_rid = 0; 491 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 492 RF_ACTIVE); 493 if (sc->mem_res == NULL) 494 return (ENOMEM); 495 496 hpet_region_size = rman_get_size(sc->mem_res); 497 /* Validate that the region is big enough for the control registers. */ 498 if (hpet_region_size < HPET_MEM_MIN_WIDTH) { 499 device_printf(dev, "memory region width %jd too small\n", 500 hpet_region_size); 501 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 502 return (ENXIO); 503 } 504 505 /* Be sure timer is enabled. */ 506 hpet_enable(sc); 507 508 /* Read basic statistics about the timer. */ 509 val = bus_read_4(sc->mem_res, HPET_PERIOD); 510 if (val == 0) { 511 device_printf(dev, "invalid period\n"); 512 hpet_disable(sc); 513 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 514 return (ENXIO); 515 } 516 517 sc->freq = (1000000000000000LL + val / 2) / val; 518 sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES); 519 vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16; 520 rev = sc->caps & HPET_CAP_REV_ID; 521 num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8); 522 /* 523 * ATI/AMD violates IA-PC HPET (High Precision Event Timers) 524 * Specification and provides an off by one number 525 * of timers/comparators. 526 * Additionally, they use unregistered value in VENDOR_ID field. 527 */ 528 if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0) 529 num_timers--; 530 /* 531 * Now validate that the region is big enough to address all counters. 532 */ 533 if (hpet_region_size < HPET_TIMER_CAP_CNF(num_timers)) { 534 device_printf(dev, 535 "memory region width %jd too small for %d timers\n", 536 hpet_region_size, num_timers); 537 hpet_disable(sc); 538 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 539 return (ENXIO); 540 } 541 542 sc->num_timers = num_timers; 543 if (bootverbose) { 544 device_printf(dev, 545 "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n", 546 vendor, rev, sc->freq, 547 (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "", 548 num_timers, 549 (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : ""); 550 } 551 for (i = 0; i < num_timers; i++) { 552 t = &sc->t[i]; 553 t->sc = sc; 554 t->num = i; 555 t->mode = TIMER_STOPPED; 556 t->intr_rid = -1; 557 t->irq = -1; 558 t->pcpu_cpu = -1; 559 t->pcpu_misrouted = 0; 560 t->pcpu_master = -1; 561 t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i)); 562 t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4); 563 if (bootverbose) { 564 device_printf(dev, 565 " t%d: irqs 0x%08x (%d)%s%s%s\n", i, 566 t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9, 567 (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "", 568 (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "", 569 (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : ""); 570 } 571 } 572 if (testenv("debug.acpi.hpet_test")) 573 hpet_test(sc); 574 /* 575 * Don't attach if the timer never increments. Since the spec 576 * requires it to be at least 10 MHz, it has to change in 1 us. 577 */ 578 val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 579 DELAY(1); 580 val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 581 if (val == val2) { 582 device_printf(dev, "HPET never increments, disabling\n"); 583 hpet_disable(sc); 584 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 585 return (ENXIO); 586 } 587 /* Announce first HPET as timecounter. */ 588 if (device_get_unit(dev) == 0) { 589 sc->tc.tc_get_timecount = hpet_get_timecount, 590 sc->tc.tc_counter_mask = ~0u, 591 sc->tc.tc_name = "HPET", 592 sc->tc.tc_quality = 950, 593 sc->tc.tc_frequency = sc->freq; 594 sc->tc.tc_priv = sc; 595 sc->tc.tc_fill_vdso_timehands = hpet_vdso_timehands; 596 #ifdef COMPAT_FREEBSD32 597 sc->tc.tc_fill_vdso_timehands32 = hpet_vdso_timehands32; 598 #endif 599 tc_init(&sc->tc); 600 } 601 /* If not disabled - setup and announce event timers. */ 602 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 603 "clock", &i) == 0 && i == 0) 604 return (0); 605 606 /* Check whether we can and want legacy routing. */ 607 sc->legacy_route = 0; 608 resource_int_value(device_get_name(dev), device_get_unit(dev), 609 "legacy_route", &sc->legacy_route); 610 if ((sc->caps & HPET_CAP_LEG_RT) == 0) 611 sc->legacy_route = 0; 612 if (sc->legacy_route) { 613 sc->t[0].vectors = 0; 614 sc->t[1].vectors = 0; 615 } 616 617 /* Check what IRQs we want use. */ 618 /* By default allow any PCI IRQs. */ 619 sc->allowed_irqs = 0xffff0000; 620 /* 621 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16 622 * Lower are also not always working for different reasons. 623 * SB800 fixed it, but seems do not implements level triggering 624 * properly, that makes it very unreliable - it freezes after any 625 * interrupt loss. Avoid legacy IRQs for AMD. 626 */ 627 if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2 || 628 vendor == HPET_VENDID_HYGON) 629 sc->allowed_irqs = 0x00000000; 630 /* 631 * NVidia MCP5x chipsets have number of unexplained interrupt 632 * problems. For some reason, using HPET interrupts breaks HDA sound. 633 */ 634 if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01) 635 sc->allowed_irqs = 0x00000000; 636 /* 637 * ServerWorks HT1000 reported to have problems with IRQs >= 16. 638 * Lower IRQs are working, but allowed mask is not set correctly. 639 * Legacy_route mode works fine. 640 */ 641 if (vendor == HPET_VENDID_SW && rev <= 0x01) 642 sc->allowed_irqs = 0x00000000; 643 /* 644 * Neither QEMU nor VirtualBox report supported IRQs correctly. 645 * The only way to use HPET there is to specify IRQs manually 646 * and/or use legacy_route. Legacy_route mode works on both. 647 */ 648 if (vm_guest) 649 sc->allowed_irqs = 0x00000000; 650 /* Let user override. */ 651 resource_int_value(device_get_name(dev), device_get_unit(dev), 652 "allowed_irqs", &sc->allowed_irqs); 653 654 /* Get how much per-CPU timers we should try to provide. */ 655 sc->per_cpu = 1; 656 resource_int_value(device_get_name(dev), device_get_unit(dev), 657 "per_cpu", &sc->per_cpu); 658 659 num_msi = 0; 660 sc->useirq = 0; 661 /* Find IRQ vectors for all timers. */ 662 cvectors = sc->allowed_irqs & 0xffff0000; 663 dvectors = sc->allowed_irqs & 0x0000ffff; 664 if (sc->legacy_route) 665 dvectors &= 0x0000fefe; 666 for (i = 0; i < num_timers; i++) { 667 t = &sc->t[i]; 668 if (sc->legacy_route && i < 2) 669 t->irq = (i == 0) ? 0 : 8; 670 #ifdef DEV_APIC 671 else if (t->caps & HPET_TCAP_FSB_INT_DEL) { 672 if ((j = PCIB_ALLOC_MSIX( 673 device_get_parent(device_get_parent(dev)), dev, 674 &t->irq))) { 675 device_printf(dev, 676 "Can't allocate interrupt for t%d: %d\n", 677 i, j); 678 } 679 } 680 #endif 681 else if (dvectors & t->vectors) { 682 t->irq = ffs(dvectors & t->vectors) - 1; 683 dvectors &= ~(1 << t->irq); 684 } 685 if (t->irq >= 0) { 686 t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq); 687 t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 688 &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE); 689 if (t->intr_res == NULL) { 690 t->irq = -1; 691 device_printf(dev, 692 "Can't map interrupt for t%d.\n", i); 693 } else if (bus_setup_intr(dev, t->intr_res, 694 INTR_TYPE_CLK, hpet_intr_single, NULL, t, 695 &t->intr_handle) != 0) { 696 t->irq = -1; 697 device_printf(dev, 698 "Can't setup interrupt for t%d.\n", i); 699 } else { 700 bus_describe_intr(dev, t->intr_res, 701 t->intr_handle, "t%d", i); 702 num_msi++; 703 } 704 } 705 if (t->irq < 0 && (cvectors & t->vectors) != 0) { 706 cvectors &= t->vectors; 707 sc->useirq |= (1 << i); 708 } 709 } 710 if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0) 711 sc->legacy_route = 0; 712 if (sc->legacy_route) 713 hpet_enable(sc); 714 /* Group timers for per-CPU operation. */ 715 num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu); 716 num_percpu_t = num_percpu_et * mp_ncpus; 717 pcpu_master = 0; 718 cur_cpu = CPU_FIRST(); 719 for (i = 0; i < num_timers; i++) { 720 t = &sc->t[i]; 721 if (t->irq >= 0 && num_percpu_t > 0) { 722 if (cur_cpu == CPU_FIRST()) 723 pcpu_master = i; 724 t->pcpu_cpu = cur_cpu; 725 t->pcpu_master = pcpu_master; 726 sc->t[pcpu_master]. 727 pcpu_slaves[cur_cpu] = i; 728 bus_bind_intr(dev, t->intr_res, cur_cpu); 729 cur_cpu = CPU_NEXT(cur_cpu); 730 num_percpu_t--; 731 } else if (t->irq >= 0) 732 bus_bind_intr(dev, t->intr_res, CPU_FIRST()); 733 } 734 bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff); 735 sc->irq = -1; 736 /* If at least one timer needs legacy IRQ - set it up. */ 737 if (sc->useirq) { 738 j = i = fls(cvectors) - 1; 739 while (j > 0 && (cvectors & (1 << (j - 1))) != 0) 740 j--; 741 sc->intr_rid = hpet_find_irq_rid(dev, j, i); 742 sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 743 &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE); 744 if (sc->intr_res == NULL) 745 device_printf(dev, "Can't map interrupt.\n"); 746 else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, 747 hpet_intr, NULL, sc, &sc->intr_handle) != 0) { 748 device_printf(dev, "Can't setup interrupt.\n"); 749 } else { 750 sc->irq = rman_get_start(sc->intr_res); 751 /* Bind IRQ to BSP to avoid live migration. */ 752 bus_bind_intr(dev, sc->intr_res, CPU_FIRST()); 753 } 754 } 755 /* Program and announce event timers. */ 756 for (i = 0; i < num_timers; i++) { 757 t = &sc->t[i]; 758 t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE); 759 t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB); 760 t->caps &= ~(HPET_TCNF_INT_TYPE); 761 t->caps |= HPET_TCNF_32MODE; 762 if (t->irq >= 0 && sc->legacy_route && i < 2) { 763 /* Legacy route doesn't need more configuration. */ 764 } else 765 #ifdef DEV_APIC 766 if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) { 767 uint64_t addr; 768 uint32_t data; 769 770 if (PCIB_MAP_MSI( 771 device_get_parent(device_get_parent(dev)), dev, 772 t->irq, &addr, &data) == 0) { 773 bus_write_4(sc->mem_res, 774 HPET_TIMER_FSB_ADDR(i), addr); 775 bus_write_4(sc->mem_res, 776 HPET_TIMER_FSB_VAL(i), data); 777 t->caps |= HPET_TCNF_FSB_EN; 778 } else 779 t->irq = -2; 780 } else 781 #endif 782 if (t->irq >= 0) 783 t->caps |= (t->irq << 9); 784 else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq))) 785 t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE; 786 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps); 787 /* Skip event timers without set up IRQ. */ 788 if (t->irq < 0 && 789 (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0)) 790 continue; 791 /* Announce the reset. */ 792 if (maxhpetet == 0) 793 t->et.et_name = "HPET"; 794 else { 795 sprintf(t->name, "HPET%d", maxhpetet); 796 t->et.et_name = t->name; 797 } 798 t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; 799 t->et.et_quality = 450; 800 if (t->pcpu_master >= 0) { 801 t->et.et_flags |= ET_FLAGS_PERCPU; 802 t->et.et_quality += 100; 803 } else if (mp_ncpus >= 8) 804 t->et.et_quality -= 100; 805 if ((t->caps & HPET_TCAP_PER_INT) == 0) 806 t->et.et_quality -= 10; 807 t->et.et_frequency = sc->freq; 808 t->et.et_min_period = 809 ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq; 810 t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq; 811 t->et.et_start = hpet_start; 812 t->et.et_stop = hpet_stop; 813 t->et.et_priv = &sc->t[i]; 814 if (t->pcpu_master < 0 || t->pcpu_master == i) { 815 et_register(&t->et); 816 maxhpetet++; 817 } 818 } 819 acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid); 820 821 make_dev_args_init(&mda); 822 mda.mda_devsw = &hpet_cdevsw; 823 mda.mda_uid = UID_ROOT; 824 mda.mda_gid = GID_WHEEL; 825 mda.mda_mode = 0644; 826 mda.mda_si_drv1 = sc; 827 error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev)); 828 if (error == 0) { 829 sc->mmap_allow = 1; 830 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow", 831 &sc->mmap_allow); 832 sc->mmap_allow_write = 0; 833 TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write", 834 &sc->mmap_allow_write); 835 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 836 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 837 OID_AUTO, "mmap_allow", 838 CTLFLAG_RW, &sc->mmap_allow, 0, 839 "Allow userland to memory map HPET"); 840 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 841 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 842 OID_AUTO, "mmap_allow_write", 843 CTLFLAG_RW, &sc->mmap_allow_write, 0, 844 "Allow userland write to the HPET register space"); 845 } else { 846 device_printf(dev, "could not create /dev/hpet%d, error %d\n", 847 device_get_unit(dev), error); 848 } 849 850 return (0); 851 } 852 853 static int 854 hpet_detach(device_t dev) 855 { 856 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 857 858 /* XXX Without a tc_remove() function, we can't detach. */ 859 return (EBUSY); 860 } 861 862 static int 863 hpet_suspend(device_t dev) 864 { 865 // struct hpet_softc *sc; 866 867 /* 868 * Disable the timer during suspend. The timer will not lose 869 * its state in S1 or S2, but we are required to disable 870 * it. 871 */ 872 // sc = device_get_softc(dev); 873 // hpet_disable(sc); 874 875 return (0); 876 } 877 878 static int 879 hpet_resume(device_t dev) 880 { 881 struct hpet_softc *sc; 882 struct hpet_timer *t; 883 int i; 884 885 /* Re-enable the timer after a resume to keep the clock advancing. */ 886 sc = device_get_softc(dev); 887 hpet_enable(sc); 888 /* Restart event timers that were running on suspend. */ 889 for (i = 0; i < sc->num_timers; i++) { 890 t = &sc->t[i]; 891 #ifdef DEV_APIC 892 if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) { 893 uint64_t addr; 894 uint32_t data; 895 896 if (PCIB_MAP_MSI( 897 device_get_parent(device_get_parent(dev)), dev, 898 t->irq, &addr, &data) == 0) { 899 bus_write_4(sc->mem_res, 900 HPET_TIMER_FSB_ADDR(i), addr); 901 bus_write_4(sc->mem_res, 902 HPET_TIMER_FSB_VAL(i), data); 903 } 904 } 905 #endif 906 if (t->mode == TIMER_STOPPED) 907 continue; 908 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 909 if (t->mode == TIMER_PERIODIC && 910 (t->caps & HPET_TCAP_PER_INT) != 0) { 911 t->caps |= HPET_TCNF_TYPE; 912 t->next += t->div; 913 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 914 t->caps | HPET_TCNF_VAL_SET); 915 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 916 t->next); 917 bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num)); 918 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 919 t->div); 920 } else { 921 t->next += sc->freq / 1024; 922 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 923 t->next); 924 } 925 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 926 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 927 } 928 return (0); 929 } 930 931 /* Print some basic latency/rate information to assist in debugging. */ 932 static void 933 hpet_test(struct hpet_softc *sc) 934 { 935 int i; 936 uint32_t u1, u2; 937 struct bintime b0, b1, b2; 938 struct timespec ts; 939 940 binuptime(&b0); 941 binuptime(&b0); 942 binuptime(&b1); 943 u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 944 for (i = 1; i < 1000; i++) 945 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 946 binuptime(&b2); 947 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 948 949 bintime_sub(&b2, &b1); 950 bintime_sub(&b1, &b0); 951 bintime_sub(&b2, &b1); 952 bintime2timespec(&b2, &ts); 953 954 device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n", 955 (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1); 956 957 device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000); 958 } 959 960 #ifdef DEV_APIC 961 static int 962 hpet_remap_intr(device_t dev, device_t child, u_int irq) 963 { 964 struct hpet_softc *sc = device_get_softc(dev); 965 struct hpet_timer *t; 966 uint64_t addr; 967 uint32_t data; 968 int error, i; 969 970 for (i = 0; i < sc->num_timers; i++) { 971 t = &sc->t[i]; 972 if (t->irq != irq) 973 continue; 974 error = PCIB_MAP_MSI( 975 device_get_parent(device_get_parent(dev)), dev, 976 irq, &addr, &data); 977 if (error) 978 return (error); 979 hpet_disable(sc); /* Stop timer to avoid interrupt loss. */ 980 bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr); 981 bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data); 982 hpet_enable(sc); 983 return (0); 984 } 985 return (ENOENT); 986 } 987 #endif 988 989 static device_method_t hpet_methods[] = { 990 /* Device interface */ 991 DEVMETHOD(device_identify, hpet_identify), 992 DEVMETHOD(device_probe, hpet_probe), 993 DEVMETHOD(device_attach, hpet_attach), 994 DEVMETHOD(device_detach, hpet_detach), 995 DEVMETHOD(device_suspend, hpet_suspend), 996 DEVMETHOD(device_resume, hpet_resume), 997 998 #ifdef DEV_APIC 999 DEVMETHOD(bus_remap_intr, hpet_remap_intr), 1000 #endif 1001 1002 DEVMETHOD_END 1003 }; 1004 1005 static driver_t hpet_driver = { 1006 "hpet", 1007 hpet_methods, 1008 sizeof(struct hpet_softc), 1009 }; 1010 1011 DRIVER_MODULE(hpet, acpi, hpet_driver, 0, 0); 1012 MODULE_DEPEND(hpet, acpi, 1, 1, 1); 1013