1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/clockchips.h> 3 #include <linux/interrupt.h> 4 #include <linux/export.h> 5 #include <linux/delay.h> 6 #include <linux/hpet.h> 7 #include <linux/cpu.h> 8 #include <linux/irq.h> 9 10 #include <asm/cpuid/api.h> 11 #include <asm/irq_remapping.h> 12 #include <asm/hpet.h> 13 #include <asm/time.h> 14 #include <asm/mwait.h> 15 #include <asm/msr.h> 16 17 #undef pr_fmt 18 #define pr_fmt(fmt) "hpet: " fmt 19 20 enum hpet_mode { 21 HPET_MODE_UNUSED, 22 HPET_MODE_LEGACY, 23 HPET_MODE_CLOCKEVT, 24 HPET_MODE_DEVICE, 25 }; 26 27 struct hpet_channel { 28 struct clock_event_device evt; 29 unsigned int num; 30 unsigned int cpu; 31 unsigned int irq; 32 unsigned int in_use; 33 enum hpet_mode mode; 34 unsigned int boot_cfg; 35 char name[10]; 36 }; 37 38 struct hpet_base { 39 unsigned int nr_channels; 40 unsigned int nr_clockevents; 41 unsigned int boot_cfg; 42 struct hpet_channel *channels; 43 }; 44 45 #define HPET_MASK CLOCKSOURCE_MASK(32) 46 47 #define HPET_MIN_CYCLES 128 48 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 49 50 /* 51 * HPET address is set in acpi/boot.c, when an ACPI entry exists 52 */ 53 unsigned long hpet_address; 54 u8 hpet_blockid; /* OS timer block num */ 55 bool hpet_msi_disable; 56 57 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) 58 static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel); 59 static struct irq_domain *hpet_domain; 60 #endif 61 62 static void __iomem *hpet_virt_address; 63 64 static struct hpet_base hpet_base; 65 66 static bool hpet_legacy_int_enabled; 67 static unsigned long hpet_freq; 68 69 bool boot_hpet_disable; 70 bool hpet_force_user; 71 static bool hpet_verbose; 72 73 static inline 74 struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt) 75 { 76 return container_of(evt, struct hpet_channel, evt); 77 } 78 79 inline unsigned int hpet_readl(unsigned int a) 80 { 81 return readl(hpet_virt_address + a); 82 } 83 84 static inline void hpet_writel(unsigned int d, unsigned int a) 85 { 86 writel(d, hpet_virt_address + a); 87 } 88 89 static inline void hpet_set_mapping(void) 90 { 91 hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE); 92 } 93 94 static inline void hpet_clear_mapping(void) 95 { 96 iounmap(hpet_virt_address); 97 hpet_virt_address = NULL; 98 } 99 100 /* 101 * HPET command line enable / disable 102 */ 103 static int __init hpet_setup(char *str) 104 { 105 while (str) { 106 char *next = strchr(str, ','); 107 108 if (next) 109 *next++ = 0; 110 if (!strncmp("disable", str, 7)) 111 boot_hpet_disable = true; 112 if (!strncmp("force", str, 5)) 113 hpet_force_user = true; 114 if (!strncmp("verbose", str, 7)) 115 hpet_verbose = true; 116 str = next; 117 } 118 return 1; 119 } 120 __setup("hpet=", hpet_setup); 121 122 static int __init disable_hpet(char *str) 123 { 124 boot_hpet_disable = true; 125 return 1; 126 } 127 __setup("nohpet", disable_hpet); 128 129 static inline int is_hpet_capable(void) 130 { 131 return !boot_hpet_disable && hpet_address; 132 } 133 134 /** 135 * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled 136 */ 137 int is_hpet_enabled(void) 138 { 139 return is_hpet_capable() && hpet_legacy_int_enabled; 140 } 141 EXPORT_SYMBOL_GPL(is_hpet_enabled); 142 143 static void _hpet_print_config(const char *function, int line) 144 { 145 u32 i, id, period, cfg, status, channels, l, h; 146 147 pr_info("%s(%d):\n", function, line); 148 149 id = hpet_readl(HPET_ID); 150 period = hpet_readl(HPET_PERIOD); 151 pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period); 152 153 cfg = hpet_readl(HPET_CFG); 154 status = hpet_readl(HPET_STATUS); 155 pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status); 156 157 l = hpet_readl(HPET_COUNTER); 158 h = hpet_readl(HPET_COUNTER+4); 159 pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); 160 161 channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 162 163 for (i = 0; i < channels; i++) { 164 l = hpet_readl(HPET_Tn_CFG(i)); 165 h = hpet_readl(HPET_Tn_CFG(i)+4); 166 pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h); 167 168 l = hpet_readl(HPET_Tn_CMP(i)); 169 h = hpet_readl(HPET_Tn_CMP(i)+4); 170 pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h); 171 172 l = hpet_readl(HPET_Tn_ROUTE(i)); 173 h = hpet_readl(HPET_Tn_ROUTE(i)+4); 174 pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h); 175 } 176 } 177 178 #define hpet_print_config() \ 179 do { \ 180 if (hpet_verbose) \ 181 _hpet_print_config(__func__, __LINE__); \ 182 } while (0) 183 184 /* 185 * When the HPET driver (/dev/hpet) is enabled, we need to reserve 186 * timer 0 and timer 1 in case of RTC emulation. 187 */ 188 #ifdef CONFIG_HPET 189 190 static void __init hpet_reserve_platform_timers(void) 191 { 192 struct hpet_data hd; 193 unsigned int i; 194 195 memset(&hd, 0, sizeof(hd)); 196 hd.hd_phys_address = hpet_address; 197 hd.hd_address = hpet_virt_address; 198 hd.hd_nirqs = hpet_base.nr_channels; 199 200 /* 201 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 202 * is wrong for i8259!) not the output IRQ. Many BIOS writers 203 * don't bother configuring *any* comparator interrupts. 204 */ 205 hd.hd_irq[0] = HPET_LEGACY_8254; 206 hd.hd_irq[1] = HPET_LEGACY_RTC; 207 208 for (i = 0; i < hpet_base.nr_channels; i++) { 209 struct hpet_channel *hc = hpet_base.channels + i; 210 211 if (i >= 2) 212 hd.hd_irq[i] = hc->irq; 213 214 switch (hc->mode) { 215 case HPET_MODE_UNUSED: 216 case HPET_MODE_DEVICE: 217 hc->mode = HPET_MODE_DEVICE; 218 break; 219 case HPET_MODE_CLOCKEVT: 220 case HPET_MODE_LEGACY: 221 hpet_reserve_timer(&hd, hc->num); 222 break; 223 } 224 } 225 226 hpet_alloc(&hd); 227 } 228 229 static void __init hpet_select_device_channel(void) 230 { 231 int i; 232 233 for (i = 0; i < hpet_base.nr_channels; i++) { 234 struct hpet_channel *hc = hpet_base.channels + i; 235 236 /* Associate the first unused channel to /dev/hpet */ 237 if (hc->mode == HPET_MODE_UNUSED) { 238 hc->mode = HPET_MODE_DEVICE; 239 return; 240 } 241 } 242 } 243 244 #else 245 static inline void hpet_reserve_platform_timers(void) { } 246 static inline void hpet_select_device_channel(void) {} 247 #endif 248 249 /* Common HPET functions */ 250 static void hpet_stop_counter(void) 251 { 252 u32 cfg = hpet_readl(HPET_CFG); 253 254 cfg &= ~HPET_CFG_ENABLE; 255 hpet_writel(cfg, HPET_CFG); 256 } 257 258 static void hpet_reset_counter(void) 259 { 260 hpet_writel(0, HPET_COUNTER); 261 hpet_writel(0, HPET_COUNTER + 4); 262 } 263 264 static void hpet_start_counter(void) 265 { 266 unsigned int cfg = hpet_readl(HPET_CFG); 267 268 cfg |= HPET_CFG_ENABLE; 269 hpet_writel(cfg, HPET_CFG); 270 } 271 272 static void hpet_restart_counter(void) 273 { 274 hpet_stop_counter(); 275 hpet_reset_counter(); 276 hpet_start_counter(); 277 } 278 279 static void hpet_resume_device(void) 280 { 281 force_hpet_resume(); 282 } 283 284 static void hpet_resume_counter(struct clocksource *cs) 285 { 286 hpet_resume_device(); 287 hpet_restart_counter(); 288 } 289 290 static void hpet_enable_legacy_int(void) 291 { 292 unsigned int cfg = hpet_readl(HPET_CFG); 293 294 cfg |= HPET_CFG_LEGACY; 295 hpet_writel(cfg, HPET_CFG); 296 hpet_legacy_int_enabled = true; 297 } 298 299 static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt) 300 { 301 unsigned int channel = clockevent_to_channel(evt)->num; 302 unsigned int cfg, cmp, now; 303 uint64_t delta; 304 305 hpet_stop_counter(); 306 delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult; 307 delta >>= evt->shift; 308 now = hpet_readl(HPET_COUNTER); 309 cmp = now + (unsigned int)delta; 310 cfg = hpet_readl(HPET_Tn_CFG(channel)); 311 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | 312 HPET_TN_32BIT; 313 hpet_writel(cfg, HPET_Tn_CFG(channel)); 314 hpet_writel(cmp, HPET_Tn_CMP(channel)); 315 udelay(1); 316 /* 317 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL 318 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL 319 * bit is automatically cleared after the first write. 320 * (See AMD-8111 HyperTransport I/O Hub Data Sheet, 321 * Publication # 24674) 322 */ 323 hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel)); 324 hpet_start_counter(); 325 hpet_print_config(); 326 327 return 0; 328 } 329 330 static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt) 331 { 332 unsigned int channel = clockevent_to_channel(evt)->num; 333 unsigned int cfg; 334 335 cfg = hpet_readl(HPET_Tn_CFG(channel)); 336 cfg &= ~HPET_TN_PERIODIC; 337 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 338 hpet_writel(cfg, HPET_Tn_CFG(channel)); 339 340 return 0; 341 } 342 343 static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt) 344 { 345 unsigned int channel = clockevent_to_channel(evt)->num; 346 unsigned int cfg; 347 348 cfg = hpet_readl(HPET_Tn_CFG(channel)); 349 cfg &= ~HPET_TN_ENABLE; 350 hpet_writel(cfg, HPET_Tn_CFG(channel)); 351 352 return 0; 353 } 354 355 static int hpet_clkevt_legacy_resume(struct clock_event_device *evt) 356 { 357 hpet_enable_legacy_int(); 358 hpet_print_config(); 359 return 0; 360 } 361 362 static int 363 hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt) 364 { 365 unsigned int channel = clockevent_to_channel(evt)->num; 366 u32 cnt; 367 s32 res; 368 369 cnt = hpet_readl(HPET_COUNTER); 370 cnt += (u32) delta; 371 hpet_writel(cnt, HPET_Tn_CMP(channel)); 372 373 /* 374 * HPETs are a complete disaster. The compare register is 375 * based on a equal comparison and neither provides a less 376 * than or equal functionality (which would require to take 377 * the wraparound into account) nor a simple count down event 378 * mode. Further the write to the comparator register is 379 * delayed internally up to two HPET clock cycles in certain 380 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even 381 * longer delays. We worked around that by reading back the 382 * compare register, but that required another workaround for 383 * ICH9,10 chips where the first readout after write can 384 * return the old stale value. We already had a minimum 385 * programming delta of 5us enforced, but a NMI or SMI hitting 386 * between the counter readout and the comparator write can 387 * move us behind that point easily. Now instead of reading 388 * the compare register back several times, we make the ETIME 389 * decision based on the following: Return ETIME if the 390 * counter value after the write is less than HPET_MIN_CYCLES 391 * away from the event or if the counter is already ahead of 392 * the event. The minimum programming delta for the generic 393 * clockevents code is set to 1.5 * HPET_MIN_CYCLES. 394 */ 395 res = (s32)(cnt - hpet_readl(HPET_COUNTER)); 396 397 return res < HPET_MIN_CYCLES ? -ETIME : 0; 398 } 399 400 static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating) 401 { 402 struct clock_event_device *evt = &hc->evt; 403 404 evt->rating = rating; 405 evt->irq = hc->irq; 406 evt->name = hc->name; 407 evt->cpumask = cpumask_of(hc->cpu); 408 evt->set_state_oneshot = hpet_clkevt_set_state_oneshot; 409 evt->set_next_event = hpet_clkevt_set_next_event; 410 evt->set_state_shutdown = hpet_clkevt_set_state_shutdown; 411 412 evt->features = CLOCK_EVT_FEAT_ONESHOT; 413 if (hc->boot_cfg & HPET_TN_PERIODIC) { 414 evt->features |= CLOCK_EVT_FEAT_PERIODIC; 415 evt->set_state_periodic = hpet_clkevt_set_state_periodic; 416 } 417 } 418 419 static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc) 420 { 421 /* 422 * Start HPET with the boot CPU's cpumask and make it global after 423 * the IO_APIC has been initialized. 424 */ 425 hc->cpu = boot_cpu_data.cpu_index; 426 strscpy(hc->name, "hpet", sizeof(hc->name)); 427 hpet_init_clockevent(hc, 50); 428 429 hc->evt.tick_resume = hpet_clkevt_legacy_resume; 430 431 /* 432 * Legacy horrors and sins from the past. HPET used periodic mode 433 * unconditionally forever on the legacy channel 0. Removing the 434 * below hack and using the conditional in hpet_init_clockevent() 435 * makes at least Qemu and one hardware machine fail to boot. 436 * There are two issues which cause the boot failure: 437 * 438 * #1 After the timer delivery test in IOAPIC and the IOAPIC setup 439 * the next interrupt is not delivered despite the HPET channel 440 * being programmed correctly. Reprogramming the HPET after 441 * switching to IOAPIC makes it work again. After fixing this, 442 * the next issue surfaces: 443 * 444 * #2 Due to the unconditional periodic mode availability the Local 445 * APIC timer calibration can hijack the global clockevents 446 * event handler without causing damage. Using oneshot at this 447 * stage makes if hang because the HPET does not get 448 * reprogrammed due to the handler hijacking. Duh, stupid me! 449 * 450 * Both issues require major surgery and especially the kick HPET 451 * again after enabling IOAPIC results in really nasty hackery. 452 * This 'assume periodic works' magic has survived since HPET 453 * support got added, so it's questionable whether this should be 454 * fixed. Both Qemu and the failing hardware machine support 455 * periodic mode despite the fact that both don't advertise it in 456 * the configuration register and both need that extra kick after 457 * switching to IOAPIC. Seems to be a feature... 458 */ 459 hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC; 460 hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic; 461 462 /* Start HPET legacy interrupts */ 463 hpet_enable_legacy_int(); 464 465 clockevents_config_and_register(&hc->evt, hpet_freq, 466 HPET_MIN_PROG_DELTA, 0x7FFFFFFF); 467 global_clock_event = &hc->evt; 468 pr_debug("Clockevent registered\n"); 469 } 470 471 /* 472 * HPET MSI Support 473 */ 474 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) 475 static void hpet_msi_unmask(struct irq_data *data) 476 { 477 struct hpet_channel *hc = irq_data_get_irq_handler_data(data); 478 unsigned int cfg; 479 480 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); 481 cfg |= HPET_TN_ENABLE | HPET_TN_FSB; 482 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); 483 } 484 485 static void hpet_msi_mask(struct irq_data *data) 486 { 487 struct hpet_channel *hc = irq_data_get_irq_handler_data(data); 488 unsigned int cfg; 489 490 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); 491 cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); 492 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); 493 } 494 495 static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg) 496 { 497 hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num)); 498 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4); 499 } 500 501 static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 502 { 503 hpet_msi_write(irq_data_get_irq_handler_data(data), msg); 504 } 505 506 static struct irq_chip hpet_msi_controller __ro_after_init = { 507 .name = "HPET-MSI", 508 .irq_unmask = hpet_msi_unmask, 509 .irq_mask = hpet_msi_mask, 510 .irq_ack = irq_chip_ack_parent, 511 .irq_set_affinity = msi_domain_set_affinity, 512 .irq_retrigger = irq_chip_retrigger_hierarchy, 513 .irq_write_msi_msg = hpet_msi_write_msg, 514 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, 515 }; 516 517 static int hpet_msi_init(struct irq_domain *domain, 518 struct msi_domain_info *info, unsigned int virq, 519 irq_hw_number_t hwirq, msi_alloc_info_t *arg) 520 { 521 irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, 522 handle_edge_irq, arg->data, "edge"); 523 524 return 0; 525 } 526 527 static struct msi_domain_ops hpet_msi_domain_ops = { 528 .msi_init = hpet_msi_init, 529 }; 530 531 static struct msi_domain_info hpet_msi_domain_info = { 532 .ops = &hpet_msi_domain_ops, 533 .chip = &hpet_msi_controller, 534 .flags = MSI_FLAG_USE_DEF_DOM_OPS, 535 }; 536 537 static struct irq_domain *hpet_create_irq_domain(int hpet_id) 538 { 539 struct msi_domain_info *domain_info; 540 struct irq_domain *parent, *d; 541 struct fwnode_handle *fn; 542 struct irq_fwspec fwspec; 543 544 if (x86_vector_domain == NULL) 545 return NULL; 546 547 domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); 548 if (!domain_info) 549 return NULL; 550 551 *domain_info = hpet_msi_domain_info; 552 domain_info->data = (void *)(long)hpet_id; 553 554 fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name, 555 hpet_id); 556 if (!fn) { 557 kfree(domain_info); 558 return NULL; 559 } 560 561 fwspec.fwnode = fn; 562 fwspec.param_count = 1; 563 fwspec.param[0] = hpet_id; 564 565 parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_GENERIC_MSI); 566 if (!parent) { 567 irq_domain_free_fwnode(fn); 568 kfree(domain_info); 569 return NULL; 570 } 571 if (parent != x86_vector_domain) 572 hpet_msi_controller.name = "IR-HPET-MSI"; 573 574 d = msi_create_irq_domain(fn, domain_info, parent); 575 if (!d) { 576 irq_domain_free_fwnode(fn); 577 kfree(domain_info); 578 } 579 return d; 580 } 581 582 static inline int hpet_dev_id(struct irq_domain *domain) 583 { 584 struct msi_domain_info *info = msi_get_domain_info(domain); 585 586 return (int)(long)info->data; 587 } 588 589 static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, 590 int dev_num) 591 { 592 struct irq_alloc_info info; 593 594 init_irq_alloc_info(&info, NULL); 595 info.type = X86_IRQ_ALLOC_TYPE_HPET; 596 info.data = hc; 597 info.devid = hpet_dev_id(domain); 598 info.hwirq = dev_num; 599 600 return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); 601 } 602 603 static int hpet_clkevt_msi_resume(struct clock_event_device *evt) 604 { 605 struct hpet_channel *hc = clockevent_to_channel(evt); 606 struct irq_data *data = irq_get_irq_data(hc->irq); 607 struct msi_msg msg; 608 609 /* Restore the MSI msg and unmask the interrupt */ 610 irq_chip_compose_msi_msg(data, &msg); 611 hpet_msi_write(hc, &msg); 612 hpet_msi_unmask(data); 613 return 0; 614 } 615 616 static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data) 617 { 618 struct hpet_channel *hc = data; 619 struct clock_event_device *evt = &hc->evt; 620 621 if (!evt->event_handler) { 622 pr_info("Spurious interrupt HPET channel %d\n", hc->num); 623 return IRQ_HANDLED; 624 } 625 626 evt->event_handler(evt); 627 return IRQ_HANDLED; 628 } 629 630 static int hpet_setup_msi_irq(struct hpet_channel *hc) 631 { 632 if (request_irq(hc->irq, hpet_msi_interrupt_handler, 633 IRQF_TIMER | IRQF_NOBALANCING, 634 hc->name, hc)) 635 return -1; 636 637 disable_irq(hc->irq); 638 irq_set_affinity(hc->irq, cpumask_of(hc->cpu)); 639 enable_irq(hc->irq); 640 641 pr_debug("%s irq %u for MSI\n", hc->name, hc->irq); 642 643 return 0; 644 } 645 646 /* Invoked from the hotplug callback on @cpu */ 647 static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu) 648 { 649 struct clock_event_device *evt = &hc->evt; 650 651 hc->cpu = cpu; 652 per_cpu(cpu_hpet_channel, cpu) = hc; 653 hpet_setup_msi_irq(hc); 654 655 hpet_init_clockevent(hc, 110); 656 evt->tick_resume = hpet_clkevt_msi_resume; 657 658 clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, 659 0x7FFFFFFF); 660 } 661 662 static struct hpet_channel *hpet_get_unused_clockevent(void) 663 { 664 int i; 665 666 for (i = 0; i < hpet_base.nr_channels; i++) { 667 struct hpet_channel *hc = hpet_base.channels + i; 668 669 if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use) 670 continue; 671 hc->in_use = 1; 672 return hc; 673 } 674 return NULL; 675 } 676 677 static int hpet_cpuhp_online(unsigned int cpu) 678 { 679 struct hpet_channel *hc = hpet_get_unused_clockevent(); 680 681 if (hc) 682 init_one_hpet_msi_clockevent(hc, cpu); 683 return 0; 684 } 685 686 static int hpet_cpuhp_dead(unsigned int cpu) 687 { 688 struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu); 689 690 if (!hc) 691 return 0; 692 free_irq(hc->irq, hc); 693 hc->in_use = 0; 694 per_cpu(cpu_hpet_channel, cpu) = NULL; 695 return 0; 696 } 697 698 static void __init hpet_select_clockevents(void) 699 { 700 unsigned int i; 701 702 hpet_base.nr_clockevents = 0; 703 704 /* No point if MSI is disabled or CPU has an Always Running APIC Timer */ 705 if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT)) 706 return; 707 708 hpet_print_config(); 709 710 hpet_domain = hpet_create_irq_domain(hpet_blockid); 711 if (!hpet_domain) 712 return; 713 714 for (i = 0; i < hpet_base.nr_channels; i++) { 715 struct hpet_channel *hc = hpet_base.channels + i; 716 int irq; 717 718 if (hc->mode != HPET_MODE_UNUSED) 719 continue; 720 721 /* Only consider HPET channel with MSI support */ 722 if (!(hc->boot_cfg & HPET_TN_FSB_CAP)) 723 continue; 724 725 sprintf(hc->name, "hpet%d", i); 726 727 irq = hpet_assign_irq(hpet_domain, hc, hc->num); 728 if (irq <= 0) 729 continue; 730 731 hc->irq = irq; 732 hc->mode = HPET_MODE_CLOCKEVT; 733 734 if (++hpet_base.nr_clockevents == num_possible_cpus()) 735 break; 736 } 737 738 pr_info("%d channels of %d reserved for per-cpu timers\n", 739 hpet_base.nr_channels, hpet_base.nr_clockevents); 740 } 741 742 #else 743 744 static inline void hpet_select_clockevents(void) { } 745 746 #define hpet_cpuhp_online NULL 747 #define hpet_cpuhp_dead NULL 748 749 #endif 750 751 /* 752 * Clock source related code 753 */ 754 #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) 755 /* 756 * Reading the HPET counter is a very slow operation. If a large number of 757 * CPUs are trying to access the HPET counter simultaneously, it can cause 758 * massive delays and slow down system performance dramatically. This may 759 * happen when HPET is the default clock source instead of TSC. For a 760 * really large system with hundreds of CPUs, the slowdown may be so 761 * severe, that it can actually crash the system because of a NMI watchdog 762 * soft lockup, for example. 763 * 764 * If multiple CPUs are trying to access the HPET counter at the same time, 765 * we don't actually need to read the counter multiple times. Instead, the 766 * other CPUs can use the counter value read by the first CPU in the group. 767 * 768 * This special feature is only enabled on x86-64 systems. It is unlikely 769 * that 32-bit x86 systems will have enough CPUs to require this feature 770 * with its associated locking overhead. We also need 64-bit atomic read. 771 * 772 * The lock and the HPET value are stored together and can be read in a 773 * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t 774 * is 32 bits in size. 775 */ 776 union hpet_lock { 777 struct { 778 arch_spinlock_t lock; 779 u32 value; 780 }; 781 u64 lockval; 782 }; 783 784 static union hpet_lock hpet __cacheline_aligned = { 785 { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, 786 }; 787 788 static u64 read_hpet(struct clocksource *cs) 789 { 790 unsigned long flags; 791 union hpet_lock old, new; 792 793 BUILD_BUG_ON(sizeof(union hpet_lock) != 8); 794 795 /* 796 * Read HPET directly if in NMI. 797 */ 798 if (in_nmi()) 799 return (u64)hpet_readl(HPET_COUNTER); 800 801 /* 802 * Read the current state of the lock and HPET value atomically. 803 */ 804 old.lockval = READ_ONCE(hpet.lockval); 805 806 if (arch_spin_is_locked(&old.lock)) 807 goto contended; 808 809 local_irq_save(flags); 810 if (arch_spin_trylock(&hpet.lock)) { 811 new.value = hpet_readl(HPET_COUNTER); 812 /* 813 * Use WRITE_ONCE() to prevent store tearing. 814 */ 815 WRITE_ONCE(hpet.value, new.value); 816 arch_spin_unlock(&hpet.lock); 817 local_irq_restore(flags); 818 return (u64)new.value; 819 } 820 local_irq_restore(flags); 821 822 contended: 823 /* 824 * Contended case 825 * -------------- 826 * Wait until the HPET value change or the lock is free to indicate 827 * its value is up-to-date. 828 * 829 * It is possible that old.value has already contained the latest 830 * HPET value while the lock holder was in the process of releasing 831 * the lock. Checking for lock state change will enable us to return 832 * the value immediately instead of waiting for the next HPET reader 833 * to come along. 834 */ 835 do { 836 cpu_relax(); 837 new.lockval = READ_ONCE(hpet.lockval); 838 } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); 839 840 return (u64)new.value; 841 } 842 #else 843 /* 844 * For UP or 32-bit. 845 */ 846 static u64 read_hpet(struct clocksource *cs) 847 { 848 return (u64)hpet_readl(HPET_COUNTER); 849 } 850 #endif 851 852 static struct clocksource clocksource_hpet = { 853 .name = "hpet", 854 .rating = 250, 855 .read = read_hpet, 856 .mask = HPET_MASK, 857 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 858 .resume = hpet_resume_counter, 859 }; 860 861 /* 862 * AMD SB700 based systems with spread spectrum enabled use a SMM based 863 * HPET emulation to provide proper frequency setting. 864 * 865 * On such systems the SMM code is initialized with the first HPET register 866 * access and takes some time to complete. During this time the config 867 * register reads 0xffffffff. We check for max 1000 loops whether the 868 * config register reads a non-0xffffffff value to make sure that the 869 * HPET is up and running before we proceed any further. 870 * 871 * A counting loop is safe, as the HPET access takes thousands of CPU cycles. 872 * 873 * On non-SB700 based machines this check is only done once and has no 874 * side effects. 875 */ 876 static bool __init hpet_cfg_working(void) 877 { 878 int i; 879 880 for (i = 0; i < 1000; i++) { 881 if (hpet_readl(HPET_CFG) != 0xFFFFFFFF) 882 return true; 883 } 884 885 pr_warn("Config register invalid. Disabling HPET\n"); 886 return false; 887 } 888 889 static bool __init hpet_counting(void) 890 { 891 u64 start, now, t1; 892 893 hpet_restart_counter(); 894 895 t1 = hpet_readl(HPET_COUNTER); 896 start = rdtsc(); 897 898 /* 899 * We don't know the TSC frequency yet, but waiting for 900 * 200000 TSC cycles is safe: 901 * 4 GHz == 50us 902 * 1 GHz == 200us 903 */ 904 do { 905 if (t1 != hpet_readl(HPET_COUNTER)) 906 return true; 907 now = rdtsc(); 908 } while ((now - start) < 200000UL); 909 910 pr_warn("Counter not counting. HPET disabled\n"); 911 return false; 912 } 913 914 static bool __init mwait_pc10_supported(void) 915 { 916 unsigned int eax, ebx, ecx, mwait_substates; 917 918 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 919 return false; 920 921 if (!cpu_feature_enabled(X86_FEATURE_MWAIT)) 922 return false; 923 924 cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates); 925 926 return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) && 927 (ecx & CPUID5_ECX_INTERRUPT_BREAK) && 928 (mwait_substates & (0xF << 28)); 929 } 930 931 /* 932 * Check whether the system supports PC10. If so force disable HPET as that 933 * stops counting in PC10. This check is overbroad as it does not take any 934 * of the following into account: 935 * 936 * - ACPI tables 937 * - Enablement of intel_idle 938 * - Command line arguments which limit intel_idle C-state support 939 * 940 * That's perfectly fine. HPET is a piece of hardware designed by committee 941 * and the only reasons why it is still in use on modern systems is the 942 * fact that it is impossible to reliably query TSC and CPU frequency via 943 * CPUID or firmware. 944 * 945 * If HPET is functional it is useful for calibrating TSC, but this can be 946 * done via PMTIMER as well which seems to be the last remaining timer on 947 * X86/INTEL platforms that has not been completely wreckaged by feature 948 * creep. 949 * 950 * In theory HPET support should be removed altogether, but there are older 951 * systems out there which depend on it because TSC and APIC timer are 952 * dysfunctional in deeper C-states. 953 * 954 * It's only 20 years now that hardware people have been asked to provide 955 * reliable and discoverable facilities which can be used for timekeeping 956 * and per CPU timer interrupts. 957 * 958 * The probability that this problem is going to be solved in the 959 * foreseeable future is close to zero, so the kernel has to be cluttered 960 * with heuristics to keep up with the ever growing amount of hardware and 961 * firmware trainwrecks. Hopefully some day hardware people will understand 962 * that the approach of "This can be fixed in software" is not sustainable. 963 * Hope dies last... 964 */ 965 static bool __init hpet_is_pc10_damaged(void) 966 { 967 unsigned long long pcfg; 968 969 /* Check whether PC10 substates are supported */ 970 if (!mwait_pc10_supported()) 971 return false; 972 973 /* Check whether PC10 is enabled in PKG C-state limit */ 974 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg); 975 if ((pcfg & 0xF) < 8) 976 return false; 977 978 if (hpet_force_user) { 979 pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n"); 980 return false; 981 } 982 983 pr_info("HPET dysfunctional in PC10. Force disabled.\n"); 984 boot_hpet_disable = true; 985 return true; 986 } 987 988 /** 989 * hpet_enable - Try to setup the HPET timer. Returns 1 on success. 990 */ 991 int __init hpet_enable(void) 992 { 993 u32 hpet_period, cfg, id, irq; 994 unsigned int i, channels; 995 struct hpet_channel *hc; 996 u64 freq; 997 998 if (!is_hpet_capable()) 999 return 0; 1000 1001 if (hpet_is_pc10_damaged()) 1002 return 0; 1003 1004 hpet_set_mapping(); 1005 if (!hpet_virt_address) 1006 return 0; 1007 1008 /* Validate that the config register is working */ 1009 if (!hpet_cfg_working()) 1010 goto out_nohpet; 1011 1012 /* 1013 * Read the period and check for a sane value: 1014 */ 1015 hpet_period = hpet_readl(HPET_PERIOD); 1016 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 1017 goto out_nohpet; 1018 1019 /* The period is a femtoseconds value. Convert it to a frequency. */ 1020 freq = FSEC_PER_SEC; 1021 do_div(freq, hpet_period); 1022 hpet_freq = freq; 1023 1024 /* 1025 * Read the HPET ID register to retrieve the IRQ routing 1026 * information and the number of channels 1027 */ 1028 id = hpet_readl(HPET_ID); 1029 hpet_print_config(); 1030 1031 /* This is the HPET channel number which is zero based */ 1032 channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 1033 1034 /* 1035 * The legacy routing mode needs at least two channels, tick timer 1036 * and the rtc emulation channel. 1037 */ 1038 if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2) 1039 goto out_nohpet; 1040 1041 hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL); 1042 if (!hc) { 1043 pr_warn("Disabling HPET.\n"); 1044 goto out_nohpet; 1045 } 1046 hpet_base.channels = hc; 1047 hpet_base.nr_channels = channels; 1048 1049 /* Read, store and sanitize the global configuration */ 1050 cfg = hpet_readl(HPET_CFG); 1051 hpet_base.boot_cfg = cfg; 1052 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); 1053 hpet_writel(cfg, HPET_CFG); 1054 if (cfg) 1055 pr_warn("Global config: Unknown bits %#x\n", cfg); 1056 1057 /* Read, store and sanitize the per channel configuration */ 1058 for (i = 0; i < channels; i++, hc++) { 1059 hc->num = i; 1060 1061 cfg = hpet_readl(HPET_Tn_CFG(i)); 1062 hc->boot_cfg = cfg; 1063 irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; 1064 hc->irq = irq; 1065 1066 cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB); 1067 hpet_writel(cfg, HPET_Tn_CFG(i)); 1068 1069 cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP 1070 | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE 1071 | HPET_TN_FSB | HPET_TN_FSB_CAP); 1072 if (cfg) 1073 pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg); 1074 } 1075 hpet_print_config(); 1076 1077 /* 1078 * Validate that the counter is counting. This needs to be done 1079 * after sanitizing the config registers to properly deal with 1080 * force enabled HPETs. 1081 */ 1082 if (!hpet_counting()) 1083 goto out_nohpet; 1084 1085 if (tsc_clocksource_watchdog_disabled()) 1086 clocksource_hpet.flags |= CLOCK_SOURCE_MUST_VERIFY; 1087 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); 1088 1089 if (id & HPET_ID_LEGSUP) { 1090 hpet_legacy_clockevent_register(&hpet_base.channels[0]); 1091 hpet_base.channels[0].mode = HPET_MODE_LEGACY; 1092 if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC)) 1093 hpet_base.channels[1].mode = HPET_MODE_LEGACY; 1094 return 1; 1095 } 1096 return 0; 1097 1098 out_nohpet: 1099 kfree(hpet_base.channels); 1100 hpet_base.channels = NULL; 1101 hpet_base.nr_channels = 0; 1102 hpet_clear_mapping(); 1103 hpet_address = 0; 1104 return 0; 1105 } 1106 1107 /* 1108 * The late initialization runs after the PCI quirks have been invoked 1109 * which might have detected a system on which the HPET can be enforced. 1110 * 1111 * Also, the MSI machinery is not working yet when the HPET is initialized 1112 * early. 1113 * 1114 * If the HPET is enabled, then: 1115 * 1116 * 1) Reserve one channel for /dev/hpet if CONFIG_HPET=y 1117 * 2) Reserve up to num_possible_cpus() channels as per CPU clockevents 1118 * 3) Setup /dev/hpet if CONFIG_HPET=y 1119 * 4) Register hotplug callbacks when clockevents are available 1120 */ 1121 static __init int hpet_late_init(void) 1122 { 1123 int ret; 1124 1125 if (!hpet_address) { 1126 if (!force_hpet_address) 1127 return -ENODEV; 1128 1129 hpet_address = force_hpet_address; 1130 hpet_enable(); 1131 } 1132 1133 if (!hpet_virt_address) 1134 return -ENODEV; 1135 1136 hpet_select_device_channel(); 1137 hpet_select_clockevents(); 1138 hpet_reserve_platform_timers(); 1139 hpet_print_config(); 1140 1141 if (!hpet_base.nr_clockevents) 1142 return 0; 1143 1144 ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online", 1145 hpet_cpuhp_online, NULL); 1146 if (ret) 1147 return ret; 1148 ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL, 1149 hpet_cpuhp_dead); 1150 if (ret) 1151 goto err_cpuhp; 1152 return 0; 1153 1154 err_cpuhp: 1155 cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE); 1156 return ret; 1157 } 1158 fs_initcall(hpet_late_init); 1159 1160 void hpet_disable(void) 1161 { 1162 unsigned int i; 1163 u32 cfg; 1164 1165 if (!is_hpet_capable() || !hpet_virt_address) 1166 return; 1167 1168 /* Restore boot configuration with the enable bit cleared */ 1169 cfg = hpet_base.boot_cfg; 1170 cfg &= ~HPET_CFG_ENABLE; 1171 hpet_writel(cfg, HPET_CFG); 1172 1173 /* Restore the channel boot configuration */ 1174 for (i = 0; i < hpet_base.nr_channels; i++) 1175 hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i)); 1176 1177 /* If the HPET was enabled at boot time, reenable it */ 1178 if (hpet_base.boot_cfg & HPET_CFG_ENABLE) 1179 hpet_writel(hpet_base.boot_cfg, HPET_CFG); 1180 } 1181 1182 #ifdef CONFIG_HPET_EMULATE_RTC 1183 1184 /* 1185 * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET 1186 * is enabled, we support RTC interrupt functionality in software. 1187 * 1188 * RTC has 3 kinds of interrupts: 1189 * 1190 * 1) Update Interrupt - generate an interrupt, every second, when the 1191 * RTC clock is updated 1192 * 2) Alarm Interrupt - generate an interrupt at a specific time of day 1193 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies 1194 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2) 1195 * 1196 * (1) and (2) above are implemented using polling at a frequency of 64 Hz: 1197 * DEFAULT_RTC_INT_FREQ. 1198 * 1199 * The exact frequency is a tradeoff between accuracy and interrupt overhead. 1200 * 1201 * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency, 1202 * if it's higher. 1203 */ 1204 #include <linux/mc146818rtc.h> 1205 #include <linux/rtc.h> 1206 1207 #define DEFAULT_RTC_INT_FREQ 64 1208 #define DEFAULT_RTC_SHIFT 6 1209 #define RTC_NUM_INTS 1 1210 1211 static unsigned long hpet_rtc_flags; 1212 static int hpet_prev_update_sec; 1213 static struct rtc_time hpet_alarm_time; 1214 static unsigned long hpet_pie_count; 1215 static u32 hpet_t1_cmp; 1216 static u32 hpet_default_delta; 1217 static u32 hpet_pie_delta; 1218 static unsigned long hpet_pie_limit; 1219 1220 static rtc_irq_handler irq_handler; 1221 1222 /* 1223 * Check that the HPET counter c1 is ahead of c2 1224 */ 1225 static inline int hpet_cnt_ahead(u32 c1, u32 c2) 1226 { 1227 return (s32)(c2 - c1) < 0; 1228 } 1229 1230 /* 1231 * Registers a IRQ handler. 1232 */ 1233 int hpet_register_irq_handler(rtc_irq_handler handler) 1234 { 1235 if (!is_hpet_enabled()) 1236 return -ENODEV; 1237 if (irq_handler) 1238 return -EBUSY; 1239 1240 irq_handler = handler; 1241 1242 return 0; 1243 } 1244 EXPORT_SYMBOL_GPL(hpet_register_irq_handler); 1245 1246 /* 1247 * Deregisters the IRQ handler registered with hpet_register_irq_handler() 1248 * and does cleanup. 1249 */ 1250 void hpet_unregister_irq_handler(rtc_irq_handler handler) 1251 { 1252 if (!is_hpet_enabled()) 1253 return; 1254 1255 irq_handler = NULL; 1256 hpet_rtc_flags = 0; 1257 } 1258 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); 1259 1260 /* 1261 * Channel 1 for RTC emulation. We use one shot mode, as periodic mode 1262 * is not supported by all HPET implementations for channel 1. 1263 * 1264 * hpet_rtc_timer_init() is called when the rtc is initialized. 1265 */ 1266 int hpet_rtc_timer_init(void) 1267 { 1268 unsigned int cfg, cnt, delta; 1269 unsigned long flags; 1270 1271 if (!is_hpet_enabled()) 1272 return 0; 1273 1274 if (!hpet_default_delta) { 1275 struct clock_event_device *evt = &hpet_base.channels[0].evt; 1276 uint64_t clc; 1277 1278 clc = (uint64_t) evt->mult * NSEC_PER_SEC; 1279 clc >>= evt->shift + DEFAULT_RTC_SHIFT; 1280 hpet_default_delta = clc; 1281 } 1282 1283 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1284 delta = hpet_default_delta; 1285 else 1286 delta = hpet_pie_delta; 1287 1288 local_irq_save(flags); 1289 1290 cnt = delta + hpet_readl(HPET_COUNTER); 1291 hpet_writel(cnt, HPET_T1_CMP); 1292 hpet_t1_cmp = cnt; 1293 1294 cfg = hpet_readl(HPET_T1_CFG); 1295 cfg &= ~HPET_TN_PERIODIC; 1296 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 1297 hpet_writel(cfg, HPET_T1_CFG); 1298 1299 local_irq_restore(flags); 1300 1301 return 1; 1302 } 1303 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); 1304 1305 static void hpet_disable_rtc_channel(void) 1306 { 1307 u32 cfg = hpet_readl(HPET_T1_CFG); 1308 1309 cfg &= ~HPET_TN_ENABLE; 1310 hpet_writel(cfg, HPET_T1_CFG); 1311 } 1312 1313 /* 1314 * The functions below are called from rtc driver. 1315 * Return 0 if HPET is not being used. 1316 * Otherwise do the necessary changes and return 1. 1317 */ 1318 int hpet_mask_rtc_irq_bit(unsigned long bit_mask) 1319 { 1320 if (!is_hpet_enabled()) 1321 return 0; 1322 1323 hpet_rtc_flags &= ~bit_mask; 1324 if (unlikely(!hpet_rtc_flags)) 1325 hpet_disable_rtc_channel(); 1326 1327 return 1; 1328 } 1329 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); 1330 1331 int hpet_set_rtc_irq_bit(unsigned long bit_mask) 1332 { 1333 unsigned long oldbits = hpet_rtc_flags; 1334 1335 if (!is_hpet_enabled()) 1336 return 0; 1337 1338 hpet_rtc_flags |= bit_mask; 1339 1340 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) 1341 hpet_prev_update_sec = -1; 1342 1343 if (!oldbits) 1344 hpet_rtc_timer_init(); 1345 1346 return 1; 1347 } 1348 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); 1349 1350 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec) 1351 { 1352 if (!is_hpet_enabled()) 1353 return 0; 1354 1355 hpet_alarm_time.tm_hour = hrs; 1356 hpet_alarm_time.tm_min = min; 1357 hpet_alarm_time.tm_sec = sec; 1358 1359 return 1; 1360 } 1361 EXPORT_SYMBOL_GPL(hpet_set_alarm_time); 1362 1363 int hpet_set_periodic_freq(unsigned long freq) 1364 { 1365 uint64_t clc; 1366 1367 if (!is_hpet_enabled()) 1368 return 0; 1369 1370 if (freq <= DEFAULT_RTC_INT_FREQ) { 1371 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; 1372 } else { 1373 struct clock_event_device *evt = &hpet_base.channels[0].evt; 1374 1375 clc = (uint64_t) evt->mult * NSEC_PER_SEC; 1376 do_div(clc, freq); 1377 clc >>= evt->shift; 1378 hpet_pie_delta = clc; 1379 hpet_pie_limit = 0; 1380 } 1381 1382 return 1; 1383 } 1384 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); 1385 1386 static void hpet_rtc_timer_reinit(void) 1387 { 1388 unsigned int delta; 1389 int lost_ints = -1; 1390 1391 if (unlikely(!hpet_rtc_flags)) 1392 hpet_disable_rtc_channel(); 1393 1394 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1395 delta = hpet_default_delta; 1396 else 1397 delta = hpet_pie_delta; 1398 1399 /* 1400 * Increment the comparator value until we are ahead of the 1401 * current count. 1402 */ 1403 do { 1404 hpet_t1_cmp += delta; 1405 hpet_writel(hpet_t1_cmp, HPET_T1_CMP); 1406 lost_ints++; 1407 } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); 1408 1409 if (lost_ints) { 1410 if (hpet_rtc_flags & RTC_PIE) 1411 hpet_pie_count += lost_ints; 1412 if (printk_ratelimit()) 1413 pr_warn("Lost %d RTC interrupts\n", lost_ints); 1414 } 1415 } 1416 1417 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 1418 { 1419 struct rtc_time curr_time; 1420 unsigned long rtc_int_flag = 0; 1421 1422 hpet_rtc_timer_reinit(); 1423 memset(&curr_time, 0, sizeof(struct rtc_time)); 1424 1425 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { 1426 if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) { 1427 pr_err_ratelimited("unable to read current time from RTC\n"); 1428 return IRQ_HANDLED; 1429 } 1430 } 1431 1432 if (hpet_rtc_flags & RTC_UIE && 1433 curr_time.tm_sec != hpet_prev_update_sec) { 1434 if (hpet_prev_update_sec >= 0) 1435 rtc_int_flag = RTC_UF; 1436 hpet_prev_update_sec = curr_time.tm_sec; 1437 } 1438 1439 if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) { 1440 rtc_int_flag |= RTC_PF; 1441 hpet_pie_count = 0; 1442 } 1443 1444 if (hpet_rtc_flags & RTC_AIE && 1445 (curr_time.tm_sec == hpet_alarm_time.tm_sec) && 1446 (curr_time.tm_min == hpet_alarm_time.tm_min) && 1447 (curr_time.tm_hour == hpet_alarm_time.tm_hour)) 1448 rtc_int_flag |= RTC_AF; 1449 1450 if (rtc_int_flag) { 1451 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); 1452 if (irq_handler) 1453 irq_handler(rtc_int_flag, dev_id); 1454 } 1455 return IRQ_HANDLED; 1456 } 1457 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); 1458 #endif 1459