1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp 3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_acpi.h" 32 #if defined(__amd64__) 33 #define DEV_APIC 34 #else 35 #include "opt_apic.h" 36 #endif 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/module.h> 41 #include <sys/proc.h> 42 #include <sys/rman.h> 43 #include <sys/time.h> 44 #include <sys/smp.h> 45 #include <sys/sysctl.h> 46 #include <sys/timeet.h> 47 #include <sys/timetc.h> 48 49 #include <contrib/dev/acpica/include/acpi.h> 50 #include <contrib/dev/acpica/include/accommon.h> 51 52 #include <dev/acpica/acpivar.h> 53 #include <dev/acpica/acpi_hpet.h> 54 55 #ifdef DEV_APIC 56 #include "pcib_if.h" 57 #endif 58 59 #define HPET_VENDID_AMD 0x4353 60 #define HPET_VENDID_AMD2 0x1022 61 #define HPET_VENDID_INTEL 0x8086 62 #define HPET_VENDID_NVIDIA 0x10de 63 #define HPET_VENDID_SW 0x1166 64 65 ACPI_SERIAL_DECL(hpet, "ACPI HPET support"); 66 67 static devclass_t hpet_devclass; 68 69 /* ACPI CA debugging */ 70 #define _COMPONENT ACPI_TIMER 71 ACPI_MODULE_NAME("HPET") 72 73 struct hpet_softc { 74 device_t dev; 75 int mem_rid; 76 int intr_rid; 77 int irq; 78 int useirq; 79 int legacy_route; 80 int per_cpu; 81 uint32_t allowed_irqs; 82 struct resource *mem_res; 83 struct resource *intr_res; 84 void *intr_handle; 85 ACPI_HANDLE handle; 86 uint64_t freq; 87 uint32_t caps; 88 struct timecounter tc; 89 struct hpet_timer { 90 struct eventtimer et; 91 struct hpet_softc *sc; 92 int num; 93 int mode; 94 int intr_rid; 95 int irq; 96 int pcpu_cpu; 97 int pcpu_misrouted; 98 int pcpu_master; 99 int pcpu_slaves[MAXCPU]; 100 struct resource *intr_res; 101 void *intr_handle; 102 uint32_t caps; 103 uint32_t vectors; 104 uint32_t div; 105 uint32_t next; 106 char name[8]; 107 } t[32]; 108 int num_timers; 109 }; 110 111 static u_int hpet_get_timecount(struct timecounter *tc); 112 static void hpet_test(struct hpet_softc *sc); 113 114 static char *hpet_ids[] = { "PNP0103", NULL }; 115 116 /* Knob to disable acpi_hpet device */ 117 bool acpi_hpet_disabled = false; 118 119 static u_int 120 hpet_get_timecount(struct timecounter *tc) 121 { 122 struct hpet_softc *sc; 123 124 sc = tc->tc_priv; 125 return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER)); 126 } 127 128 static void 129 hpet_enable(struct hpet_softc *sc) 130 { 131 uint32_t val; 132 133 val = bus_read_4(sc->mem_res, HPET_CONFIG); 134 if (sc->legacy_route) 135 val |= HPET_CNF_LEG_RT; 136 else 137 val &= ~HPET_CNF_LEG_RT; 138 val |= HPET_CNF_ENABLE; 139 bus_write_4(sc->mem_res, HPET_CONFIG, val); 140 } 141 142 static void 143 hpet_disable(struct hpet_softc *sc) 144 { 145 uint32_t val; 146 147 val = bus_read_4(sc->mem_res, HPET_CONFIG); 148 val &= ~HPET_CNF_ENABLE; 149 bus_write_4(sc->mem_res, HPET_CONFIG, val); 150 } 151 152 static int 153 hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period) 154 { 155 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 156 struct hpet_timer *t; 157 struct hpet_softc *sc = mt->sc; 158 uint32_t fdiv, now; 159 160 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 161 if (period != 0) { 162 t->mode = 1; 163 t->div = (sc->freq * period) >> 32; 164 } else { 165 t->mode = 2; 166 t->div = 0; 167 } 168 if (first != 0) 169 fdiv = (sc->freq * first) >> 32; 170 else 171 fdiv = t->div; 172 if (t->irq < 0) 173 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 174 t->caps |= HPET_TCNF_INT_ENB; 175 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 176 restart: 177 t->next = now + fdiv; 178 if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) { 179 t->caps |= HPET_TCNF_TYPE; 180 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 181 t->caps | HPET_TCNF_VAL_SET); 182 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 183 t->next); 184 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 185 t->div); 186 } else { 187 t->caps &= ~HPET_TCNF_TYPE; 188 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 189 t->caps); 190 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 191 t->next); 192 } 193 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 194 if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) { 195 fdiv *= 2; 196 goto restart; 197 } 198 return (0); 199 } 200 201 static int 202 hpet_stop(struct eventtimer *et) 203 { 204 struct hpet_timer *mt = (struct hpet_timer *)et->et_priv; 205 struct hpet_timer *t; 206 struct hpet_softc *sc = mt->sc; 207 208 t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]]; 209 t->mode = 0; 210 t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE); 211 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 212 return (0); 213 } 214 215 static int 216 hpet_intr_single(void *arg) 217 { 218 struct hpet_timer *t = (struct hpet_timer *)arg; 219 struct hpet_timer *mt; 220 struct hpet_softc *sc = t->sc; 221 uint32_t now; 222 223 if (t->mode == 0) 224 return (FILTER_STRAY); 225 /* Check that per-CPU timer interrupt reached right CPU. */ 226 if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) { 227 if ((++t->pcpu_misrouted) % 32 == 0) { 228 printf("HPET interrupt routed to the wrong CPU" 229 " (timer %d CPU %d -> %d)!\n", 230 t->num, t->pcpu_cpu, curcpu); 231 } 232 233 /* 234 * Reload timer, hoping that next time may be more lucky 235 * (system will manage proper interrupt binding). 236 */ 237 if ((t->mode == 1 && (t->caps & HPET_TCAP_PER_INT) == 0) || 238 t->mode == 2) { 239 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) + 240 sc->freq / 8; 241 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 242 t->next); 243 } 244 return (FILTER_HANDLED); 245 } 246 if (t->mode == 1 && 247 (t->caps & HPET_TCAP_PER_INT) == 0) { 248 t->next += t->div; 249 now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 250 if ((int32_t)((now + t->div / 2) - t->next) > 0) 251 t->next = now + t->div / 2; 252 bus_write_4(sc->mem_res, 253 HPET_TIMER_COMPARATOR(t->num), t->next); 254 } else if (t->mode == 2) 255 t->mode = 0; 256 mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master]; 257 if (mt->et.et_active) 258 mt->et.et_event_cb(&mt->et, mt->et.et_arg); 259 return (FILTER_HANDLED); 260 } 261 262 static int 263 hpet_intr(void *arg) 264 { 265 struct hpet_softc *sc = (struct hpet_softc *)arg; 266 int i; 267 uint32_t val; 268 269 val = bus_read_4(sc->mem_res, HPET_ISR); 270 if (val) { 271 bus_write_4(sc->mem_res, HPET_ISR, val); 272 val &= sc->useirq; 273 for (i = 0; i < sc->num_timers; i++) { 274 if ((val & (1 << i)) == 0) 275 continue; 276 hpet_intr_single(&sc->t[i]); 277 } 278 return (FILTER_HANDLED); 279 } 280 return (FILTER_STRAY); 281 } 282 283 static ACPI_STATUS 284 hpet_find(ACPI_HANDLE handle, UINT32 level, void *context, 285 void **status) 286 { 287 char **ids; 288 uint32_t id = (uint32_t)(uintptr_t)context; 289 uint32_t uid = 0; 290 291 for (ids = hpet_ids; *ids != NULL; ids++) { 292 if (acpi_MatchHid(handle, *ids)) 293 break; 294 } 295 if (*ids == NULL) 296 return (AE_OK); 297 if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) || 298 id == uid) 299 *status = acpi_get_device(handle); 300 return (AE_OK); 301 } 302 303 /* 304 * Find an existing IRQ resource that matches the requested IRQ range 305 * and return its RID. If one is not found, use a new RID. 306 */ 307 static int 308 hpet_find_irq_rid(device_t dev, u_long start, u_long end) 309 { 310 u_long irq; 311 int error, rid; 312 313 for (rid = 0;; rid++) { 314 error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL); 315 if (error != 0 || (start <= irq && irq <= end)) 316 return (rid); 317 } 318 } 319 320 /* Discover the HPET via the ACPI table of the same name. */ 321 static void 322 hpet_identify(driver_t *driver, device_t parent) 323 { 324 ACPI_TABLE_HPET *hpet; 325 ACPI_STATUS status; 326 device_t child; 327 int i; 328 329 /* Only one HPET device can be added. */ 330 if (devclass_get_device(hpet_devclass, 0)) 331 return; 332 for (i = 1; ; i++) { 333 /* Search for HPET table. */ 334 status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet); 335 if (ACPI_FAILURE(status)) 336 return; 337 /* Search for HPET device with same ID. */ 338 child = NULL; 339 AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 340 100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence, 341 (void *)&child); 342 /* If found - let it be probed in normal way. */ 343 if (child) { 344 if (bus_get_resource(child, SYS_RES_MEMORY, 0, 345 NULL, NULL) != 0) 346 bus_set_resource(child, SYS_RES_MEMORY, 0, 347 hpet->Address.Address, HPET_MEM_WIDTH); 348 continue; 349 } 350 /* If not - create it from table info. */ 351 child = BUS_ADD_CHILD(parent, 2, "hpet", 0); 352 if (child == NULL) { 353 printf("%s: can't add child\n", __func__); 354 continue; 355 } 356 bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address, 357 HPET_MEM_WIDTH); 358 } 359 } 360 361 static int 362 hpet_probe(device_t dev) 363 { 364 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 365 366 if (acpi_disabled("hpet") || acpi_hpet_disabled) 367 return (ENXIO); 368 if (acpi_get_handle(dev) != NULL && 369 ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids) == NULL) 370 return (ENXIO); 371 372 device_set_desc(dev, "High Precision Event Timer"); 373 return (0); 374 } 375 376 static int 377 hpet_attach(device_t dev) 378 { 379 struct hpet_softc *sc; 380 struct hpet_timer *t; 381 int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu; 382 int pcpu_master; 383 static int maxhpetet = 0; 384 uint32_t val, val2, cvectors, dvectors; 385 uint16_t vendor, rev; 386 387 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 388 389 sc = device_get_softc(dev); 390 sc->dev = dev; 391 sc->handle = acpi_get_handle(dev); 392 393 sc->mem_rid = 0; 394 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 395 RF_ACTIVE); 396 if (sc->mem_res == NULL) 397 return (ENOMEM); 398 399 /* Validate that we can access the whole region. */ 400 if (rman_get_size(sc->mem_res) < HPET_MEM_WIDTH) { 401 device_printf(dev, "memory region width %ld too small\n", 402 rman_get_size(sc->mem_res)); 403 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 404 return (ENXIO); 405 } 406 407 /* Be sure timer is enabled. */ 408 hpet_enable(sc); 409 410 /* Read basic statistics about the timer. */ 411 val = bus_read_4(sc->mem_res, HPET_PERIOD); 412 if (val == 0) { 413 device_printf(dev, "invalid period\n"); 414 hpet_disable(sc); 415 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 416 return (ENXIO); 417 } 418 419 sc->freq = (1000000000000000LL + val / 2) / val; 420 sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES); 421 vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16; 422 rev = sc->caps & HPET_CAP_REV_ID; 423 num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8); 424 /* 425 * ATI/AMD violates IA-PC HPET (High Precision Event Timers) 426 * Specification and provides an off by one number 427 * of timers/comparators. 428 * Additionally, they use unregistered value in VENDOR_ID field. 429 */ 430 if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0) 431 num_timers--; 432 sc->num_timers = num_timers; 433 if (bootverbose) { 434 device_printf(dev, 435 "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n", 436 vendor, rev, sc->freq, 437 (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "", 438 num_timers, 439 (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : ""); 440 } 441 for (i = 0; i < num_timers; i++) { 442 t = &sc->t[i]; 443 t->sc = sc; 444 t->num = i; 445 t->mode = 0; 446 t->intr_rid = -1; 447 t->irq = -1; 448 t->pcpu_cpu = -1; 449 t->pcpu_misrouted = 0; 450 t->pcpu_master = -1; 451 t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i)); 452 t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4); 453 if (bootverbose) { 454 device_printf(dev, 455 " t%d: irqs 0x%08x (%d)%s%s%s\n", i, 456 t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9, 457 (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "", 458 (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "", 459 (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : ""); 460 } 461 } 462 if (testenv("debug.acpi.hpet_test")) 463 hpet_test(sc); 464 /* 465 * Don't attach if the timer never increments. Since the spec 466 * requires it to be at least 10 MHz, it has to change in 1 us. 467 */ 468 val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 469 DELAY(1); 470 val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 471 if (val == val2) { 472 device_printf(dev, "HPET never increments, disabling\n"); 473 hpet_disable(sc); 474 bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); 475 return (ENXIO); 476 } 477 /* Announce first HPET as timecounter. */ 478 if (device_get_unit(dev) == 0) { 479 sc->tc.tc_get_timecount = hpet_get_timecount, 480 sc->tc.tc_counter_mask = ~0u, 481 sc->tc.tc_name = "HPET", 482 sc->tc.tc_quality = 950, 483 sc->tc.tc_frequency = sc->freq; 484 sc->tc.tc_priv = sc; 485 tc_init(&sc->tc); 486 } 487 /* If not disabled - setup and announce event timers. */ 488 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 489 "clock", &i) == 0 && i == 0) 490 return (0); 491 492 /* Check whether we can and want legacy routing. */ 493 sc->legacy_route = 0; 494 resource_int_value(device_get_name(dev), device_get_unit(dev), 495 "legacy_route", &sc->legacy_route); 496 if ((sc->caps & HPET_CAP_LEG_RT) == 0) 497 sc->legacy_route = 0; 498 if (sc->legacy_route) { 499 sc->t[0].vectors = 0; 500 sc->t[1].vectors = 0; 501 } 502 503 /* Check what IRQs we want use. */ 504 /* By default allow any PCI IRQs. */ 505 sc->allowed_irqs = 0xffff0000; 506 /* 507 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16 508 * Lower are also not always working for different reasons. 509 * SB800 fixed it, but seems do not implements level triggering 510 * properly, that makes it very unreliable - it freezes after any 511 * interrupt loss. Avoid legacy IRQs for AMD. 512 */ 513 if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2) 514 sc->allowed_irqs = 0x00000000; 515 /* 516 * NVidia MCP5x chipsets have number of unexplained interrupt 517 * problems. For some reason, using HPET interrupts breaks HDA sound. 518 */ 519 if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01) 520 sc->allowed_irqs = 0x00000000; 521 /* 522 * ServerWorks HT1000 reported to have problems with IRQs >= 16. 523 * Lower IRQs are working, but allowed mask is not set correctly. 524 * Legacy_route mode works fine. 525 */ 526 if (vendor == HPET_VENDID_SW && rev <= 0x01) 527 sc->allowed_irqs = 0x00000000; 528 /* 529 * Neither QEMU nor VirtualBox report supported IRQs correctly. 530 * The only way to use HPET there is to specify IRQs manually 531 * and/or use legacy_route. Legacy_route mode works on both. 532 */ 533 if (vm_guest) 534 sc->allowed_irqs = 0x00000000; 535 /* Let user override. */ 536 resource_int_value(device_get_name(dev), device_get_unit(dev), 537 "allowed_irqs", &sc->allowed_irqs); 538 539 /* Get how much per-CPU timers we should try to provide. */ 540 sc->per_cpu = 1; 541 resource_int_value(device_get_name(dev), device_get_unit(dev), 542 "per_cpu", &sc->per_cpu); 543 544 num_msi = 0; 545 sc->useirq = 0; 546 /* Find IRQ vectors for all timers. */ 547 cvectors = sc->allowed_irqs & 0xffff0000; 548 dvectors = sc->allowed_irqs & 0x0000ffff; 549 if (sc->legacy_route) 550 dvectors &= 0x0000fefe; 551 for (i = 0; i < num_timers; i++) { 552 t = &sc->t[i]; 553 if (sc->legacy_route && i < 2) 554 t->irq = (i == 0) ? 0 : 8; 555 #ifdef DEV_APIC 556 else if (t->caps & HPET_TCAP_FSB_INT_DEL) { 557 if ((j = PCIB_ALLOC_MSIX( 558 device_get_parent(device_get_parent(dev)), dev, 559 &t->irq))) { 560 device_printf(dev, 561 "Can't allocate interrupt for t%d: %d\n", 562 i, j); 563 } 564 } 565 #endif 566 else if (dvectors & t->vectors) { 567 t->irq = ffs(dvectors & t->vectors) - 1; 568 dvectors &= ~(1 << t->irq); 569 } 570 if (t->irq >= 0) { 571 t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq); 572 t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 573 &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE); 574 if (t->intr_res == NULL) { 575 t->irq = -1; 576 device_printf(dev, 577 "Can't map interrupt for t%d.\n", i); 578 } else if (bus_setup_intr(dev, t->intr_res, 579 INTR_TYPE_CLK, hpet_intr_single, NULL, t, 580 &t->intr_handle) != 0) { 581 t->irq = -1; 582 device_printf(dev, 583 "Can't setup interrupt for t%d.\n", i); 584 } else { 585 bus_describe_intr(dev, t->intr_res, 586 t->intr_handle, "t%d", i); 587 num_msi++; 588 } 589 } 590 if (t->irq < 0 && (cvectors & t->vectors) != 0) { 591 cvectors &= t->vectors; 592 sc->useirq |= (1 << i); 593 } 594 } 595 if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0) 596 sc->legacy_route = 0; 597 if (sc->legacy_route) 598 hpet_enable(sc); 599 /* Group timers for per-CPU operation. */ 600 num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu); 601 num_percpu_t = num_percpu_et * mp_ncpus; 602 pcpu_master = 0; 603 cur_cpu = CPU_FIRST(); 604 for (i = 0; i < num_timers; i++) { 605 t = &sc->t[i]; 606 if (t->irq >= 0 && num_percpu_t > 0) { 607 if (cur_cpu == CPU_FIRST()) 608 pcpu_master = i; 609 t->pcpu_cpu = cur_cpu; 610 t->pcpu_master = pcpu_master; 611 sc->t[pcpu_master]. 612 pcpu_slaves[cur_cpu] = i; 613 bus_bind_intr(dev, t->intr_res, cur_cpu); 614 cur_cpu = CPU_NEXT(cur_cpu); 615 num_percpu_t--; 616 } else if (t->irq >= 0) 617 bus_bind_intr(dev, t->intr_res, CPU_FIRST()); 618 } 619 bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff); 620 sc->irq = -1; 621 /* If at least one timer needs legacy IRQ - set it up. */ 622 if (sc->useirq) { 623 j = i = fls(cvectors) - 1; 624 while (j > 0 && (cvectors & (1 << (j - 1))) != 0) 625 j--; 626 sc->intr_rid = hpet_find_irq_rid(dev, j, i); 627 sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ, 628 &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE); 629 if (sc->intr_res == NULL) 630 device_printf(dev, "Can't map interrupt.\n"); 631 else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK, 632 hpet_intr, NULL, sc, &sc->intr_handle) != 0) { 633 device_printf(dev, "Can't setup interrupt.\n"); 634 } else { 635 sc->irq = rman_get_start(sc->intr_res); 636 /* Bind IRQ to BSP to avoid live migration. */ 637 bus_bind_intr(dev, sc->intr_res, CPU_FIRST()); 638 } 639 } 640 /* Program and announce event timers. */ 641 for (i = 0; i < num_timers; i++) { 642 t = &sc->t[i]; 643 t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE); 644 t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB); 645 t->caps &= ~(HPET_TCNF_INT_TYPE); 646 t->caps |= HPET_TCNF_32MODE; 647 if (t->irq >= 0 && sc->legacy_route && i < 2) { 648 /* Legacy route doesn't need more configuration. */ 649 } else 650 #ifdef DEV_APIC 651 if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) { 652 uint64_t addr; 653 uint32_t data; 654 655 if (PCIB_MAP_MSI( 656 device_get_parent(device_get_parent(dev)), dev, 657 t->irq, &addr, &data) == 0) { 658 bus_write_4(sc->mem_res, 659 HPET_TIMER_FSB_ADDR(i), addr); 660 bus_write_4(sc->mem_res, 661 HPET_TIMER_FSB_VAL(i), data); 662 t->caps |= HPET_TCNF_FSB_EN; 663 } else 664 t->irq = -2; 665 } else 666 #endif 667 if (t->irq >= 0) 668 t->caps |= (t->irq << 9); 669 else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq))) 670 t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE; 671 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps); 672 /* Skip event timers without set up IRQ. */ 673 if (t->irq < 0 && 674 (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0)) 675 continue; 676 /* Announce the reset. */ 677 if (maxhpetet == 0) 678 t->et.et_name = "HPET"; 679 else { 680 sprintf(t->name, "HPET%d", maxhpetet); 681 t->et.et_name = t->name; 682 } 683 t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; 684 t->et.et_quality = 450; 685 if (t->pcpu_master >= 0) { 686 t->et.et_flags |= ET_FLAGS_PERCPU; 687 t->et.et_quality += 100; 688 } else if (mp_ncpus >= 8) 689 t->et.et_quality -= 100; 690 if ((t->caps & HPET_TCAP_PER_INT) == 0) 691 t->et.et_quality -= 10; 692 t->et.et_frequency = sc->freq; 693 t->et.et_min_period = 694 ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq; 695 t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq; 696 t->et.et_start = hpet_start; 697 t->et.et_stop = hpet_stop; 698 t->et.et_priv = &sc->t[i]; 699 if (t->pcpu_master < 0 || t->pcpu_master == i) { 700 et_register(&t->et); 701 maxhpetet++; 702 } 703 } 704 return (0); 705 } 706 707 static int 708 hpet_detach(device_t dev) 709 { 710 ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__); 711 712 /* XXX Without a tc_remove() function, we can't detach. */ 713 return (EBUSY); 714 } 715 716 static int 717 hpet_suspend(device_t dev) 718 { 719 // struct hpet_softc *sc; 720 721 /* 722 * Disable the timer during suspend. The timer will not lose 723 * its state in S1 or S2, but we are required to disable 724 * it. 725 */ 726 // sc = device_get_softc(dev); 727 // hpet_disable(sc); 728 729 return (0); 730 } 731 732 static int 733 hpet_resume(device_t dev) 734 { 735 struct hpet_softc *sc; 736 struct hpet_timer *t; 737 int i; 738 739 /* Re-enable the timer after a resume to keep the clock advancing. */ 740 sc = device_get_softc(dev); 741 hpet_enable(sc); 742 /* Restart event timers that were running on suspend. */ 743 for (i = 0; i < sc->num_timers; i++) { 744 t = &sc->t[i]; 745 #ifdef DEV_APIC 746 if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) { 747 uint64_t addr; 748 uint32_t data; 749 750 if (PCIB_MAP_MSI( 751 device_get_parent(device_get_parent(dev)), dev, 752 t->irq, &addr, &data) == 0) { 753 bus_write_4(sc->mem_res, 754 HPET_TIMER_FSB_ADDR(i), addr); 755 bus_write_4(sc->mem_res, 756 HPET_TIMER_FSB_VAL(i), data); 757 } 758 } 759 #endif 760 if (t->mode == 0) 761 continue; 762 t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 763 if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) { 764 t->caps |= HPET_TCNF_TYPE; 765 t->next += t->div; 766 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), 767 t->caps | HPET_TCNF_VAL_SET); 768 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 769 t->next); 770 bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num)); 771 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 772 t->div); 773 } else { 774 t->next += sc->freq / 1024; 775 bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num), 776 t->next); 777 } 778 bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num); 779 bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps); 780 } 781 return (0); 782 } 783 784 /* Print some basic latency/rate information to assist in debugging. */ 785 static void 786 hpet_test(struct hpet_softc *sc) 787 { 788 int i; 789 uint32_t u1, u2; 790 struct bintime b0, b1, b2; 791 struct timespec ts; 792 793 binuptime(&b0); 794 binuptime(&b0); 795 binuptime(&b1); 796 u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 797 for (i = 1; i < 1000; i++) 798 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 799 binuptime(&b2); 800 u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER); 801 802 bintime_sub(&b2, &b1); 803 bintime_sub(&b1, &b0); 804 bintime_sub(&b2, &b1); 805 bintime2timespec(&b2, &ts); 806 807 device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n", 808 (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1); 809 810 device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000); 811 } 812 813 #ifdef DEV_APIC 814 static int 815 hpet_remap_intr(device_t dev, device_t child, u_int irq) 816 { 817 struct hpet_softc *sc = device_get_softc(dev); 818 struct hpet_timer *t; 819 uint64_t addr; 820 uint32_t data; 821 int error, i; 822 823 for (i = 0; i < sc->num_timers; i++) { 824 t = &sc->t[i]; 825 if (t->irq != irq) 826 continue; 827 error = PCIB_MAP_MSI( 828 device_get_parent(device_get_parent(dev)), dev, 829 irq, &addr, &data); 830 if (error) 831 return (error); 832 hpet_disable(sc); /* Stop timer to avoid interrupt loss. */ 833 bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr); 834 bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data); 835 hpet_enable(sc); 836 return (0); 837 } 838 return (ENOENT); 839 } 840 #endif 841 842 static device_method_t hpet_methods[] = { 843 /* Device interface */ 844 DEVMETHOD(device_identify, hpet_identify), 845 DEVMETHOD(device_probe, hpet_probe), 846 DEVMETHOD(device_attach, hpet_attach), 847 DEVMETHOD(device_detach, hpet_detach), 848 DEVMETHOD(device_suspend, hpet_suspend), 849 DEVMETHOD(device_resume, hpet_resume), 850 851 #ifdef DEV_APIC 852 DEVMETHOD(bus_remap_intr, hpet_remap_intr), 853 #endif 854 855 DEVMETHOD_END 856 }; 857 858 static driver_t hpet_driver = { 859 "hpet", 860 hpet_methods, 861 sizeof(struct hpet_softc), 862 }; 863 864 DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0); 865 MODULE_DEPEND(hpet, acpi, 1, 1, 1); 866