1 #include <linux/clocksource.h> 2 #include <linux/clockchips.h> 3 #include <linux/interrupt.h> 4 #include <linux/sysdev.h> 5 #include <linux/delay.h> 6 #include <linux/errno.h> 7 #include <linux/hpet.h> 8 #include <linux/init.h> 9 #include <linux/cpu.h> 10 #include <linux/pm.h> 11 #include <linux/io.h> 12 13 #include <asm/fixmap.h> 14 #include <asm/i8253.h> 15 #include <asm/hpet.h> 16 17 #define HPET_MASK CLOCKSOURCE_MASK(32) 18 #define HPET_SHIFT 22 19 20 /* FSEC = 10^-15 21 NSEC = 10^-9 */ 22 #define FSEC_PER_NSEC 1000000L 23 24 #define HPET_DEV_USED_BIT 2 25 #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) 26 #define HPET_DEV_VALID 0x8 27 #define HPET_DEV_FSB_CAP 0x1000 28 #define HPET_DEV_PERI_CAP 0x2000 29 30 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) 31 32 /* 33 * HPET address is set in acpi/boot.c, when an ACPI entry exists 34 */ 35 unsigned long hpet_address; 36 u8 hpet_blockid; /* OS timer block num */ 37 u8 hpet_msi_disable; 38 39 #ifdef CONFIG_PCI_MSI 40 static unsigned long hpet_num_timers; 41 #endif 42 static void __iomem *hpet_virt_address; 43 44 struct hpet_dev { 45 struct clock_event_device evt; 46 unsigned int num; 47 int cpu; 48 unsigned int irq; 49 unsigned int flags; 50 char name[10]; 51 }; 52 53 inline unsigned int hpet_readl(unsigned int a) 54 { 55 return readl(hpet_virt_address + a); 56 } 57 58 static inline void hpet_writel(unsigned int d, unsigned int a) 59 { 60 writel(d, hpet_virt_address + a); 61 } 62 63 #ifdef CONFIG_X86_64 64 #include <asm/pgtable.h> 65 #endif 66 67 static inline void hpet_set_mapping(void) 68 { 69 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); 70 #ifdef CONFIG_X86_64 71 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); 72 #endif 73 } 74 75 static inline void hpet_clear_mapping(void) 76 { 77 iounmap(hpet_virt_address); 78 hpet_virt_address = NULL; 79 } 80 81 /* 82 * HPET command line enable / disable 83 */ 84 static int boot_hpet_disable; 85 int hpet_force_user; 86 static int hpet_verbose; 87 88 static int __init hpet_setup(char *str) 89 { 90 if (str) { 91 if (!strncmp("disable", str, 7)) 92 boot_hpet_disable = 1; 93 if (!strncmp("force", str, 5)) 94 hpet_force_user = 1; 95 if (!strncmp("verbose", str, 7)) 96 hpet_verbose = 1; 97 } 98 return 1; 99 } 100 __setup("hpet=", hpet_setup); 101 102 static int __init disable_hpet(char *str) 103 { 104 boot_hpet_disable = 1; 105 return 1; 106 } 107 __setup("nohpet", disable_hpet); 108 109 static inline int is_hpet_capable(void) 110 { 111 return !boot_hpet_disable && hpet_address; 112 } 113 114 /* 115 * HPET timer interrupt enable / disable 116 */ 117 static int hpet_legacy_int_enabled; 118 119 /** 120 * is_hpet_enabled - check whether the hpet timer interrupt is enabled 121 */ 122 int is_hpet_enabled(void) 123 { 124 return is_hpet_capable() && hpet_legacy_int_enabled; 125 } 126 EXPORT_SYMBOL_GPL(is_hpet_enabled); 127 128 static void _hpet_print_config(const char *function, int line) 129 { 130 u32 i, timers, l, h; 131 printk(KERN_INFO "hpet: %s(%d):\n", function, line); 132 l = hpet_readl(HPET_ID); 133 h = hpet_readl(HPET_PERIOD); 134 timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 135 printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); 136 l = hpet_readl(HPET_CFG); 137 h = hpet_readl(HPET_STATUS); 138 printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); 139 l = hpet_readl(HPET_COUNTER); 140 h = hpet_readl(HPET_COUNTER+4); 141 printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); 142 143 for (i = 0; i < timers; i++) { 144 l = hpet_readl(HPET_Tn_CFG(i)); 145 h = hpet_readl(HPET_Tn_CFG(i)+4); 146 printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", 147 i, l, h); 148 l = hpet_readl(HPET_Tn_CMP(i)); 149 h = hpet_readl(HPET_Tn_CMP(i)+4); 150 printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", 151 i, l, h); 152 l = hpet_readl(HPET_Tn_ROUTE(i)); 153 h = hpet_readl(HPET_Tn_ROUTE(i)+4); 154 printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", 155 i, l, h); 156 } 157 } 158 159 #define hpet_print_config() \ 160 do { \ 161 if (hpet_verbose) \ 162 _hpet_print_config(__FUNCTION__, __LINE__); \ 163 } while (0) 164 165 /* 166 * When the hpet driver (/dev/hpet) is enabled, we need to reserve 167 * timer 0 and timer 1 in case of RTC emulation. 168 */ 169 #ifdef CONFIG_HPET 170 171 static void hpet_reserve_msi_timers(struct hpet_data *hd); 172 173 static void hpet_reserve_platform_timers(unsigned int id) 174 { 175 struct hpet __iomem *hpet = hpet_virt_address; 176 struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; 177 unsigned int nrtimers, i; 178 struct hpet_data hd; 179 180 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 181 182 memset(&hd, 0, sizeof(hd)); 183 hd.hd_phys_address = hpet_address; 184 hd.hd_address = hpet; 185 hd.hd_nirqs = nrtimers; 186 hpet_reserve_timer(&hd, 0); 187 188 #ifdef CONFIG_HPET_EMULATE_RTC 189 hpet_reserve_timer(&hd, 1); 190 #endif 191 192 /* 193 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 194 * is wrong for i8259!) not the output IRQ. Many BIOS writers 195 * don't bother configuring *any* comparator interrupts. 196 */ 197 hd.hd_irq[0] = HPET_LEGACY_8254; 198 hd.hd_irq[1] = HPET_LEGACY_RTC; 199 200 for (i = 2; i < nrtimers; timer++, i++) { 201 hd.hd_irq[i] = (readl(&timer->hpet_config) & 202 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; 203 } 204 205 hpet_reserve_msi_timers(&hd); 206 207 hpet_alloc(&hd); 208 209 } 210 #else 211 static void hpet_reserve_platform_timers(unsigned int id) { } 212 #endif 213 214 /* 215 * Common hpet info 216 */ 217 static unsigned long hpet_period; 218 219 static void hpet_legacy_set_mode(enum clock_event_mode mode, 220 struct clock_event_device *evt); 221 static int hpet_legacy_next_event(unsigned long delta, 222 struct clock_event_device *evt); 223 224 /* 225 * The hpet clock event device 226 */ 227 static struct clock_event_device hpet_clockevent = { 228 .name = "hpet", 229 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 230 .set_mode = hpet_legacy_set_mode, 231 .set_next_event = hpet_legacy_next_event, 232 .shift = 32, 233 .irq = 0, 234 .rating = 50, 235 }; 236 237 static void hpet_stop_counter(void) 238 { 239 unsigned long cfg = hpet_readl(HPET_CFG); 240 cfg &= ~HPET_CFG_ENABLE; 241 hpet_writel(cfg, HPET_CFG); 242 } 243 244 static void hpet_reset_counter(void) 245 { 246 hpet_writel(0, HPET_COUNTER); 247 hpet_writel(0, HPET_COUNTER + 4); 248 } 249 250 static void hpet_start_counter(void) 251 { 252 unsigned int cfg = hpet_readl(HPET_CFG); 253 cfg |= HPET_CFG_ENABLE; 254 hpet_writel(cfg, HPET_CFG); 255 } 256 257 static void hpet_restart_counter(void) 258 { 259 hpet_stop_counter(); 260 hpet_reset_counter(); 261 hpet_start_counter(); 262 } 263 264 static void hpet_resume_device(void) 265 { 266 force_hpet_resume(); 267 } 268 269 static void hpet_resume_counter(struct clocksource *cs) 270 { 271 hpet_resume_device(); 272 hpet_restart_counter(); 273 } 274 275 static void hpet_enable_legacy_int(void) 276 { 277 unsigned int cfg = hpet_readl(HPET_CFG); 278 279 cfg |= HPET_CFG_LEGACY; 280 hpet_writel(cfg, HPET_CFG); 281 hpet_legacy_int_enabled = 1; 282 } 283 284 static void hpet_legacy_clockevent_register(void) 285 { 286 /* Start HPET legacy interrupts */ 287 hpet_enable_legacy_int(); 288 289 /* 290 * The mult factor is defined as (include/linux/clockchips.h) 291 * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) 292 * hpet_period is in units of femtoseconds (per cycle), so 293 * mult/2^shift = cyc/ns = 10^6/hpet_period 294 * mult = (10^6 * 2^shift)/hpet_period 295 * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period 296 */ 297 hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, 298 hpet_period, hpet_clockevent.shift); 299 /* Calculate the min / max delta */ 300 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 301 &hpet_clockevent); 302 /* 5 usec minimum reprogramming delta. */ 303 hpet_clockevent.min_delta_ns = 5000; 304 305 /* 306 * Start hpet with the boot cpu mask and make it 307 * global after the IO_APIC has been initialized. 308 */ 309 hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); 310 clockevents_register_device(&hpet_clockevent); 311 global_clock_event = &hpet_clockevent; 312 printk(KERN_DEBUG "hpet clockevent registered\n"); 313 } 314 315 static int hpet_setup_msi_irq(unsigned int irq); 316 317 static void hpet_set_mode(enum clock_event_mode mode, 318 struct clock_event_device *evt, int timer) 319 { 320 unsigned int cfg, cmp, now; 321 uint64_t delta; 322 323 switch (mode) { 324 case CLOCK_EVT_MODE_PERIODIC: 325 hpet_stop_counter(); 326 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 327 delta >>= evt->shift; 328 now = hpet_readl(HPET_COUNTER); 329 cmp = now + (unsigned int) delta; 330 cfg = hpet_readl(HPET_Tn_CFG(timer)); 331 /* Make sure we use edge triggered interrupts */ 332 cfg &= ~HPET_TN_LEVEL; 333 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 334 HPET_TN_SETVAL | HPET_TN_32BIT; 335 hpet_writel(cfg, HPET_Tn_CFG(timer)); 336 hpet_writel(cmp, HPET_Tn_CMP(timer)); 337 udelay(1); 338 /* 339 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL 340 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL 341 * bit is automatically cleared after the first write. 342 * (See AMD-8111 HyperTransport I/O Hub Data Sheet, 343 * Publication # 24674) 344 */ 345 hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); 346 hpet_start_counter(); 347 hpet_print_config(); 348 break; 349 350 case CLOCK_EVT_MODE_ONESHOT: 351 cfg = hpet_readl(HPET_Tn_CFG(timer)); 352 cfg &= ~HPET_TN_PERIODIC; 353 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 354 hpet_writel(cfg, HPET_Tn_CFG(timer)); 355 break; 356 357 case CLOCK_EVT_MODE_UNUSED: 358 case CLOCK_EVT_MODE_SHUTDOWN: 359 cfg = hpet_readl(HPET_Tn_CFG(timer)); 360 cfg &= ~HPET_TN_ENABLE; 361 hpet_writel(cfg, HPET_Tn_CFG(timer)); 362 break; 363 364 case CLOCK_EVT_MODE_RESUME: 365 if (timer == 0) { 366 hpet_enable_legacy_int(); 367 } else { 368 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 369 hpet_setup_msi_irq(hdev->irq); 370 disable_irq(hdev->irq); 371 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 372 enable_irq(hdev->irq); 373 } 374 hpet_print_config(); 375 break; 376 } 377 } 378 379 static int hpet_next_event(unsigned long delta, 380 struct clock_event_device *evt, int timer) 381 { 382 u32 cnt; 383 384 cnt = hpet_readl(HPET_COUNTER); 385 cnt += (u32) delta; 386 hpet_writel(cnt, HPET_Tn_CMP(timer)); 387 388 /* 389 * We need to read back the CMP register on certain HPET 390 * implementations (ATI chipsets) which seem to delay the 391 * transfer of the compare register into the internal compare 392 * logic. With small deltas this might actually be too late as 393 * the counter could already be higher than the compare value 394 * at that point and we would wait for the next hpet interrupt 395 * forever. We found out that reading the CMP register back 396 * forces the transfer so we can rely on the comparison with 397 * the counter register below. If the read back from the 398 * compare register does not match the value we programmed 399 * then we might have a real hardware problem. We can not do 400 * much about it here, but at least alert the user/admin with 401 * a prominent warning. 402 */ 403 WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, 404 KERN_WARNING "hpet: compare register read back failed.\n"); 405 406 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 407 } 408 409 static void hpet_legacy_set_mode(enum clock_event_mode mode, 410 struct clock_event_device *evt) 411 { 412 hpet_set_mode(mode, evt, 0); 413 } 414 415 static int hpet_legacy_next_event(unsigned long delta, 416 struct clock_event_device *evt) 417 { 418 return hpet_next_event(delta, evt, 0); 419 } 420 421 /* 422 * HPET MSI Support 423 */ 424 #ifdef CONFIG_PCI_MSI 425 426 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 427 static struct hpet_dev *hpet_devs; 428 429 void hpet_msi_unmask(unsigned int irq) 430 { 431 struct hpet_dev *hdev = get_irq_data(irq); 432 unsigned int cfg; 433 434 /* unmask it */ 435 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 436 cfg |= HPET_TN_FSB; 437 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 438 } 439 440 void hpet_msi_mask(unsigned int irq) 441 { 442 unsigned int cfg; 443 struct hpet_dev *hdev = get_irq_data(irq); 444 445 /* mask it */ 446 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 447 cfg &= ~HPET_TN_FSB; 448 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 449 } 450 451 void hpet_msi_write(unsigned int irq, struct msi_msg *msg) 452 { 453 struct hpet_dev *hdev = get_irq_data(irq); 454 455 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 456 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 457 } 458 459 void hpet_msi_read(unsigned int irq, struct msi_msg *msg) 460 { 461 struct hpet_dev *hdev = get_irq_data(irq); 462 463 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 464 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 465 msg->address_hi = 0; 466 } 467 468 static void hpet_msi_set_mode(enum clock_event_mode mode, 469 struct clock_event_device *evt) 470 { 471 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 472 hpet_set_mode(mode, evt, hdev->num); 473 } 474 475 static int hpet_msi_next_event(unsigned long delta, 476 struct clock_event_device *evt) 477 { 478 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 479 return hpet_next_event(delta, evt, hdev->num); 480 } 481 482 static int hpet_setup_msi_irq(unsigned int irq) 483 { 484 if (arch_setup_hpet_msi(irq, hpet_blockid)) { 485 destroy_irq(irq); 486 return -EINVAL; 487 } 488 return 0; 489 } 490 491 static int hpet_assign_irq(struct hpet_dev *dev) 492 { 493 unsigned int irq; 494 495 irq = create_irq(); 496 if (!irq) 497 return -EINVAL; 498 499 set_irq_data(irq, dev); 500 501 if (hpet_setup_msi_irq(irq)) 502 return -EINVAL; 503 504 dev->irq = irq; 505 return 0; 506 } 507 508 static irqreturn_t hpet_interrupt_handler(int irq, void *data) 509 { 510 struct hpet_dev *dev = (struct hpet_dev *)data; 511 struct clock_event_device *hevt = &dev->evt; 512 513 if (!hevt->event_handler) { 514 printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", 515 dev->num); 516 return IRQ_HANDLED; 517 } 518 519 hevt->event_handler(hevt); 520 return IRQ_HANDLED; 521 } 522 523 static int hpet_setup_irq(struct hpet_dev *dev) 524 { 525 526 if (request_irq(dev->irq, hpet_interrupt_handler, 527 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 528 dev->name, dev)) 529 return -1; 530 531 disable_irq(dev->irq); 532 irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); 533 enable_irq(dev->irq); 534 535 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 536 dev->name, dev->irq); 537 538 return 0; 539 } 540 541 /* This should be called in specific @cpu */ 542 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) 543 { 544 struct clock_event_device *evt = &hdev->evt; 545 uint64_t hpet_freq; 546 547 WARN_ON(cpu != smp_processor_id()); 548 if (!(hdev->flags & HPET_DEV_VALID)) 549 return; 550 551 if (hpet_setup_msi_irq(hdev->irq)) 552 return; 553 554 hdev->cpu = cpu; 555 per_cpu(cpu_hpet_dev, cpu) = hdev; 556 evt->name = hdev->name; 557 hpet_setup_irq(hdev); 558 evt->irq = hdev->irq; 559 560 evt->rating = 110; 561 evt->features = CLOCK_EVT_FEAT_ONESHOT; 562 if (hdev->flags & HPET_DEV_PERI_CAP) 563 evt->features |= CLOCK_EVT_FEAT_PERIODIC; 564 565 evt->set_mode = hpet_msi_set_mode; 566 evt->set_next_event = hpet_msi_next_event; 567 evt->shift = 32; 568 569 /* 570 * The period is a femto seconds value. We need to calculate the 571 * scaled math multiplication factor for nanosecond to hpet tick 572 * conversion. 573 */ 574 hpet_freq = 1000000000000000ULL; 575 do_div(hpet_freq, hpet_period); 576 evt->mult = div_sc((unsigned long) hpet_freq, 577 NSEC_PER_SEC, evt->shift); 578 /* Calculate the max delta */ 579 evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); 580 /* 5 usec minimum reprogramming delta. */ 581 evt->min_delta_ns = 5000; 582 583 evt->cpumask = cpumask_of(hdev->cpu); 584 clockevents_register_device(evt); 585 } 586 587 #ifdef CONFIG_HPET 588 /* Reserve at least one timer for userspace (/dev/hpet) */ 589 #define RESERVE_TIMERS 1 590 #else 591 #define RESERVE_TIMERS 0 592 #endif 593 594 static void hpet_msi_capability_lookup(unsigned int start_timer) 595 { 596 unsigned int id; 597 unsigned int num_timers; 598 unsigned int num_timers_used = 0; 599 int i; 600 601 if (hpet_msi_disable) 602 return; 603 604 if (boot_cpu_has(X86_FEATURE_ARAT)) 605 return; 606 id = hpet_readl(HPET_ID); 607 608 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); 609 num_timers++; /* Value read out starts from 0 */ 610 hpet_print_config(); 611 612 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); 613 if (!hpet_devs) 614 return; 615 616 hpet_num_timers = num_timers; 617 618 for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { 619 struct hpet_dev *hdev = &hpet_devs[num_timers_used]; 620 unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); 621 622 /* Only consider HPET timer with MSI support */ 623 if (!(cfg & HPET_TN_FSB_CAP)) 624 continue; 625 626 hdev->flags = 0; 627 if (cfg & HPET_TN_PERIODIC_CAP) 628 hdev->flags |= HPET_DEV_PERI_CAP; 629 hdev->num = i; 630 631 sprintf(hdev->name, "hpet%d", i); 632 if (hpet_assign_irq(hdev)) 633 continue; 634 635 hdev->flags |= HPET_DEV_FSB_CAP; 636 hdev->flags |= HPET_DEV_VALID; 637 num_timers_used++; 638 if (num_timers_used == num_possible_cpus()) 639 break; 640 } 641 642 printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", 643 num_timers, num_timers_used); 644 } 645 646 #ifdef CONFIG_HPET 647 static void hpet_reserve_msi_timers(struct hpet_data *hd) 648 { 649 int i; 650 651 if (!hpet_devs) 652 return; 653 654 for (i = 0; i < hpet_num_timers; i++) { 655 struct hpet_dev *hdev = &hpet_devs[i]; 656 657 if (!(hdev->flags & HPET_DEV_VALID)) 658 continue; 659 660 hd->hd_irq[hdev->num] = hdev->irq; 661 hpet_reserve_timer(hd, hdev->num); 662 } 663 } 664 #endif 665 666 static struct hpet_dev *hpet_get_unused_timer(void) 667 { 668 int i; 669 670 if (!hpet_devs) 671 return NULL; 672 673 for (i = 0; i < hpet_num_timers; i++) { 674 struct hpet_dev *hdev = &hpet_devs[i]; 675 676 if (!(hdev->flags & HPET_DEV_VALID)) 677 continue; 678 if (test_and_set_bit(HPET_DEV_USED_BIT, 679 (unsigned long *)&hdev->flags)) 680 continue; 681 return hdev; 682 } 683 return NULL; 684 } 685 686 struct hpet_work_struct { 687 struct delayed_work work; 688 struct completion complete; 689 }; 690 691 static void hpet_work(struct work_struct *w) 692 { 693 struct hpet_dev *hdev; 694 int cpu = smp_processor_id(); 695 struct hpet_work_struct *hpet_work; 696 697 hpet_work = container_of(w, struct hpet_work_struct, work.work); 698 699 hdev = hpet_get_unused_timer(); 700 if (hdev) 701 init_one_hpet_msi_clockevent(hdev, cpu); 702 703 complete(&hpet_work->complete); 704 } 705 706 static int hpet_cpuhp_notify(struct notifier_block *n, 707 unsigned long action, void *hcpu) 708 { 709 unsigned long cpu = (unsigned long)hcpu; 710 struct hpet_work_struct work; 711 struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); 712 713 switch (action & 0xf) { 714 case CPU_ONLINE: 715 INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); 716 init_completion(&work.complete); 717 /* FIXME: add schedule_work_on() */ 718 schedule_delayed_work_on(cpu, &work.work, 0); 719 wait_for_completion(&work.complete); 720 destroy_timer_on_stack(&work.work.timer); 721 break; 722 case CPU_DEAD: 723 if (hdev) { 724 free_irq(hdev->irq, hdev); 725 hdev->flags &= ~HPET_DEV_USED; 726 per_cpu(cpu_hpet_dev, cpu) = NULL; 727 } 728 break; 729 } 730 return NOTIFY_OK; 731 } 732 #else 733 734 static int hpet_setup_msi_irq(unsigned int irq) 735 { 736 return 0; 737 } 738 static void hpet_msi_capability_lookup(unsigned int start_timer) 739 { 740 return; 741 } 742 743 #ifdef CONFIG_HPET 744 static void hpet_reserve_msi_timers(struct hpet_data *hd) 745 { 746 return; 747 } 748 #endif 749 750 static int hpet_cpuhp_notify(struct notifier_block *n, 751 unsigned long action, void *hcpu) 752 { 753 return NOTIFY_OK; 754 } 755 756 #endif 757 758 /* 759 * Clock source related code 760 */ 761 static cycle_t read_hpet(struct clocksource *cs) 762 { 763 return (cycle_t)hpet_readl(HPET_COUNTER); 764 } 765 766 #ifdef CONFIG_X86_64 767 static cycle_t __vsyscall_fn vread_hpet(void) 768 { 769 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); 770 } 771 #endif 772 773 static struct clocksource clocksource_hpet = { 774 .name = "hpet", 775 .rating = 250, 776 .read = read_hpet, 777 .mask = HPET_MASK, 778 .shift = HPET_SHIFT, 779 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 780 .resume = hpet_resume_counter, 781 #ifdef CONFIG_X86_64 782 .vread = vread_hpet, 783 #endif 784 }; 785 786 static int hpet_clocksource_register(void) 787 { 788 u64 start, now; 789 cycle_t t1; 790 791 /* Start the counter */ 792 hpet_restart_counter(); 793 794 /* Verify whether hpet counter works */ 795 t1 = hpet_readl(HPET_COUNTER); 796 rdtscll(start); 797 798 /* 799 * We don't know the TSC frequency yet, but waiting for 800 * 200000 TSC cycles is safe: 801 * 4 GHz == 50us 802 * 1 GHz == 200us 803 */ 804 do { 805 rep_nop(); 806 rdtscll(now); 807 } while ((now - start) < 200000UL); 808 809 if (t1 == hpet_readl(HPET_COUNTER)) { 810 printk(KERN_WARNING 811 "HPET counter not counting. HPET disabled\n"); 812 return -ENODEV; 813 } 814 815 /* 816 * The definition of mult is (include/linux/clocksource.h) 817 * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc 818 * so we first need to convert hpet_period to ns/cyc units: 819 * mult/2^shift = ns/cyc = hpet_period/10^6 820 * mult = (hpet_period * 2^shift)/10^6 821 * mult = (hpet_period << shift)/FSEC_PER_NSEC 822 */ 823 clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT); 824 825 clocksource_register(&clocksource_hpet); 826 827 return 0; 828 } 829 830 /** 831 * hpet_enable - Try to setup the HPET timer. Returns 1 on success. 832 */ 833 int __init hpet_enable(void) 834 { 835 unsigned int id; 836 int i; 837 838 if (!is_hpet_capable()) 839 return 0; 840 841 hpet_set_mapping(); 842 843 /* 844 * Read the period and check for a sane value: 845 */ 846 hpet_period = hpet_readl(HPET_PERIOD); 847 848 /* 849 * AMD SB700 based systems with spread spectrum enabled use a 850 * SMM based HPET emulation to provide proper frequency 851 * setting. The SMM code is initialized with the first HPET 852 * register access and takes some time to complete. During 853 * this time the config register reads 0xffffffff. We check 854 * for max. 1000 loops whether the config register reads a non 855 * 0xffffffff value to make sure that HPET is up and running 856 * before we go further. A counting loop is safe, as the HPET 857 * access takes thousands of CPU cycles. On non SB700 based 858 * machines this check is only done once and has no side 859 * effects. 860 */ 861 for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { 862 if (i == 1000) { 863 printk(KERN_WARNING 864 "HPET config register value = 0xFFFFFFFF. " 865 "Disabling HPET\n"); 866 goto out_nohpet; 867 } 868 } 869 870 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 871 goto out_nohpet; 872 873 /* 874 * Read the HPET ID register to retrieve the IRQ routing 875 * information and the number of channels 876 */ 877 id = hpet_readl(HPET_ID); 878 hpet_print_config(); 879 880 #ifdef CONFIG_HPET_EMULATE_RTC 881 /* 882 * The legacy routing mode needs at least two channels, tick timer 883 * and the rtc emulation channel. 884 */ 885 if (!(id & HPET_ID_NUMBER)) 886 goto out_nohpet; 887 #endif 888 889 if (hpet_clocksource_register()) 890 goto out_nohpet; 891 892 if (id & HPET_ID_LEGSUP) { 893 hpet_legacy_clockevent_register(); 894 return 1; 895 } 896 return 0; 897 898 out_nohpet: 899 hpet_clear_mapping(); 900 hpet_address = 0; 901 return 0; 902 } 903 904 /* 905 * Needs to be late, as the reserve_timer code calls kalloc ! 906 * 907 * Not a problem on i386 as hpet_enable is called from late_time_init, 908 * but on x86_64 it is necessary ! 909 */ 910 static __init int hpet_late_init(void) 911 { 912 int cpu; 913 914 if (boot_hpet_disable) 915 return -ENODEV; 916 917 if (!hpet_address) { 918 if (!force_hpet_address) 919 return -ENODEV; 920 921 hpet_address = force_hpet_address; 922 hpet_enable(); 923 } 924 925 if (!hpet_virt_address) 926 return -ENODEV; 927 928 if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) 929 hpet_msi_capability_lookup(2); 930 else 931 hpet_msi_capability_lookup(0); 932 933 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 934 hpet_print_config(); 935 936 if (hpet_msi_disable) 937 return 0; 938 939 if (boot_cpu_has(X86_FEATURE_ARAT)) 940 return 0; 941 942 for_each_online_cpu(cpu) { 943 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); 944 } 945 946 /* This notifier should be called after workqueue is ready */ 947 hotcpu_notifier(hpet_cpuhp_notify, -20); 948 949 return 0; 950 } 951 fs_initcall(hpet_late_init); 952 953 void hpet_disable(void) 954 { 955 if (is_hpet_capable()) { 956 unsigned int cfg = hpet_readl(HPET_CFG); 957 958 if (hpet_legacy_int_enabled) { 959 cfg &= ~HPET_CFG_LEGACY; 960 hpet_legacy_int_enabled = 0; 961 } 962 cfg &= ~HPET_CFG_ENABLE; 963 hpet_writel(cfg, HPET_CFG); 964 } 965 } 966 967 #ifdef CONFIG_HPET_EMULATE_RTC 968 969 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET 970 * is enabled, we support RTC interrupt functionality in software. 971 * RTC has 3 kinds of interrupts: 972 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock 973 * is updated 974 * 2) Alarm Interrupt - generate an interrupt at a specific time of day 975 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies 976 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) 977 * (1) and (2) above are implemented using polling at a frequency of 978 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt 979 * overhead. (DEFAULT_RTC_INT_FREQ) 980 * For (3), we use interrupts at 64Hz or user specified periodic 981 * frequency, whichever is higher. 982 */ 983 #include <linux/mc146818rtc.h> 984 #include <linux/rtc.h> 985 #include <asm/rtc.h> 986 987 #define DEFAULT_RTC_INT_FREQ 64 988 #define DEFAULT_RTC_SHIFT 6 989 #define RTC_NUM_INTS 1 990 991 static unsigned long hpet_rtc_flags; 992 static int hpet_prev_update_sec; 993 static struct rtc_time hpet_alarm_time; 994 static unsigned long hpet_pie_count; 995 static u32 hpet_t1_cmp; 996 static u32 hpet_default_delta; 997 static u32 hpet_pie_delta; 998 static unsigned long hpet_pie_limit; 999 1000 static rtc_irq_handler irq_handler; 1001 1002 /* 1003 * Check that the hpet counter c1 is ahead of the c2 1004 */ 1005 static inline int hpet_cnt_ahead(u32 c1, u32 c2) 1006 { 1007 return (s32)(c2 - c1) < 0; 1008 } 1009 1010 /* 1011 * Registers a IRQ handler. 1012 */ 1013 int hpet_register_irq_handler(rtc_irq_handler handler) 1014 { 1015 if (!is_hpet_enabled()) 1016 return -ENODEV; 1017 if (irq_handler) 1018 return -EBUSY; 1019 1020 irq_handler = handler; 1021 1022 return 0; 1023 } 1024 EXPORT_SYMBOL_GPL(hpet_register_irq_handler); 1025 1026 /* 1027 * Deregisters the IRQ handler registered with hpet_register_irq_handler() 1028 * and does cleanup. 1029 */ 1030 void hpet_unregister_irq_handler(rtc_irq_handler handler) 1031 { 1032 if (!is_hpet_enabled()) 1033 return; 1034 1035 irq_handler = NULL; 1036 hpet_rtc_flags = 0; 1037 } 1038 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); 1039 1040 /* 1041 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode 1042 * is not supported by all HPET implementations for timer 1. 1043 * 1044 * hpet_rtc_timer_init() is called when the rtc is initialized. 1045 */ 1046 int hpet_rtc_timer_init(void) 1047 { 1048 unsigned int cfg, cnt, delta; 1049 unsigned long flags; 1050 1051 if (!is_hpet_enabled()) 1052 return 0; 1053 1054 if (!hpet_default_delta) { 1055 uint64_t clc; 1056 1057 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1058 clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; 1059 hpet_default_delta = clc; 1060 } 1061 1062 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1063 delta = hpet_default_delta; 1064 else 1065 delta = hpet_pie_delta; 1066 1067 local_irq_save(flags); 1068 1069 cnt = delta + hpet_readl(HPET_COUNTER); 1070 hpet_writel(cnt, HPET_T1_CMP); 1071 hpet_t1_cmp = cnt; 1072 1073 cfg = hpet_readl(HPET_T1_CFG); 1074 cfg &= ~HPET_TN_PERIODIC; 1075 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 1076 hpet_writel(cfg, HPET_T1_CFG); 1077 1078 local_irq_restore(flags); 1079 1080 return 1; 1081 } 1082 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); 1083 1084 /* 1085 * The functions below are called from rtc driver. 1086 * Return 0 if HPET is not being used. 1087 * Otherwise do the necessary changes and return 1. 1088 */ 1089 int hpet_mask_rtc_irq_bit(unsigned long bit_mask) 1090 { 1091 if (!is_hpet_enabled()) 1092 return 0; 1093 1094 hpet_rtc_flags &= ~bit_mask; 1095 return 1; 1096 } 1097 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); 1098 1099 int hpet_set_rtc_irq_bit(unsigned long bit_mask) 1100 { 1101 unsigned long oldbits = hpet_rtc_flags; 1102 1103 if (!is_hpet_enabled()) 1104 return 0; 1105 1106 hpet_rtc_flags |= bit_mask; 1107 1108 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) 1109 hpet_prev_update_sec = -1; 1110 1111 if (!oldbits) 1112 hpet_rtc_timer_init(); 1113 1114 return 1; 1115 } 1116 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); 1117 1118 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, 1119 unsigned char sec) 1120 { 1121 if (!is_hpet_enabled()) 1122 return 0; 1123 1124 hpet_alarm_time.tm_hour = hrs; 1125 hpet_alarm_time.tm_min = min; 1126 hpet_alarm_time.tm_sec = sec; 1127 1128 return 1; 1129 } 1130 EXPORT_SYMBOL_GPL(hpet_set_alarm_time); 1131 1132 int hpet_set_periodic_freq(unsigned long freq) 1133 { 1134 uint64_t clc; 1135 1136 if (!is_hpet_enabled()) 1137 return 0; 1138 1139 if (freq <= DEFAULT_RTC_INT_FREQ) 1140 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; 1141 else { 1142 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1143 do_div(clc, freq); 1144 clc >>= hpet_clockevent.shift; 1145 hpet_pie_delta = clc; 1146 } 1147 return 1; 1148 } 1149 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); 1150 1151 int hpet_rtc_dropped_irq(void) 1152 { 1153 return is_hpet_enabled(); 1154 } 1155 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); 1156 1157 static void hpet_rtc_timer_reinit(void) 1158 { 1159 unsigned int cfg, delta; 1160 int lost_ints = -1; 1161 1162 if (unlikely(!hpet_rtc_flags)) { 1163 cfg = hpet_readl(HPET_T1_CFG); 1164 cfg &= ~HPET_TN_ENABLE; 1165 hpet_writel(cfg, HPET_T1_CFG); 1166 return; 1167 } 1168 1169 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1170 delta = hpet_default_delta; 1171 else 1172 delta = hpet_pie_delta; 1173 1174 /* 1175 * Increment the comparator value until we are ahead of the 1176 * current count. 1177 */ 1178 do { 1179 hpet_t1_cmp += delta; 1180 hpet_writel(hpet_t1_cmp, HPET_T1_CMP); 1181 lost_ints++; 1182 } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); 1183 1184 if (lost_ints) { 1185 if (hpet_rtc_flags & RTC_PIE) 1186 hpet_pie_count += lost_ints; 1187 if (printk_ratelimit()) 1188 printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", 1189 lost_ints); 1190 } 1191 } 1192 1193 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 1194 { 1195 struct rtc_time curr_time; 1196 unsigned long rtc_int_flag = 0; 1197 1198 hpet_rtc_timer_reinit(); 1199 memset(&curr_time, 0, sizeof(struct rtc_time)); 1200 1201 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) 1202 get_rtc_time(&curr_time); 1203 1204 if (hpet_rtc_flags & RTC_UIE && 1205 curr_time.tm_sec != hpet_prev_update_sec) { 1206 if (hpet_prev_update_sec >= 0) 1207 rtc_int_flag = RTC_UF; 1208 hpet_prev_update_sec = curr_time.tm_sec; 1209 } 1210 1211 if (hpet_rtc_flags & RTC_PIE && 1212 ++hpet_pie_count >= hpet_pie_limit) { 1213 rtc_int_flag |= RTC_PF; 1214 hpet_pie_count = 0; 1215 } 1216 1217 if (hpet_rtc_flags & RTC_AIE && 1218 (curr_time.tm_sec == hpet_alarm_time.tm_sec) && 1219 (curr_time.tm_min == hpet_alarm_time.tm_min) && 1220 (curr_time.tm_hour == hpet_alarm_time.tm_hour)) 1221 rtc_int_flag |= RTC_AF; 1222 1223 if (rtc_int_flag) { 1224 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); 1225 if (irq_handler) 1226 irq_handler(rtc_int_flag, dev_id); 1227 } 1228 return IRQ_HANDLED; 1229 } 1230 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); 1231 #endif 1232