1 /* 2 * Intel & MS High Precision Event Timer Implementation. 3 * 4 * Copyright (C) 2003 Intel Corporation 5 * Venki Pallipadi 6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. 7 * Bob Picco <robert.picco@hp.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/smp_lock.h> 18 #include <linux/types.h> 19 #include <linux/miscdevice.h> 20 #include <linux/major.h> 21 #include <linux/ioport.h> 22 #include <linux/fcntl.h> 23 #include <linux/init.h> 24 #include <linux/poll.h> 25 #include <linux/mm.h> 26 #include <linux/proc_fs.h> 27 #include <linux/spinlock.h> 28 #include <linux/sysctl.h> 29 #include <linux/wait.h> 30 #include <linux/bcd.h> 31 #include <linux/seq_file.h> 32 #include <linux/bitops.h> 33 #include <linux/compat.h> 34 #include <linux/clocksource.h> 35 #include <linux/slab.h> 36 37 #include <asm/current.h> 38 #include <asm/uaccess.h> 39 #include <asm/system.h> 40 #include <asm/io.h> 41 #include <asm/irq.h> 42 #include <asm/div64.h> 43 44 #include <linux/acpi.h> 45 #include <acpi/acpi_bus.h> 46 #include <linux/hpet.h> 47 48 /* 49 * The High Precision Event Timer driver. 50 * This driver is closely modelled after the rtc.c driver. 51 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 52 */ 53 #define HPET_USER_FREQ (64) 54 #define HPET_DRIFT (500) 55 56 #define HPET_RANGE_SIZE 1024 /* from HPET spec */ 57 58 59 /* WARNING -- don't get confused. These macros are never used 60 * to write the (single) counter, and rarely to read it. 61 * They're badly named; to fix, someday. 62 */ 63 #if BITS_PER_LONG == 64 64 #define write_counter(V, MC) writeq(V, MC) 65 #define read_counter(MC) readq(MC) 66 #else 67 #define write_counter(V, MC) writel(V, MC) 68 #define read_counter(MC) readl(MC) 69 #endif 70 71 static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ 72 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 73 74 /* This clocksource driver currently only works on ia64 */ 75 #ifdef CONFIG_IA64 76 static void __iomem *hpet_mctr; 77 78 static cycle_t read_hpet(struct clocksource *cs) 79 { 80 return (cycle_t)read_counter((void __iomem *)hpet_mctr); 81 } 82 83 static struct clocksource clocksource_hpet = { 84 .name = "hpet", 85 .rating = 250, 86 .read = read_hpet, 87 .mask = CLOCKSOURCE_MASK(64), 88 .mult = 0, /* to be calculated */ 89 .shift = 10, 90 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 91 }; 92 static struct clocksource *hpet_clocksource; 93 #endif 94 95 /* A lock for concurrent access by app and isr hpet activity. */ 96 static DEFINE_SPINLOCK(hpet_lock); 97 98 #define HPET_DEV_NAME (7) 99 100 struct hpet_dev { 101 struct hpets *hd_hpets; 102 struct hpet __iomem *hd_hpet; 103 struct hpet_timer __iomem *hd_timer; 104 unsigned long hd_ireqfreq; 105 unsigned long hd_irqdata; 106 wait_queue_head_t hd_waitqueue; 107 struct fasync_struct *hd_async_queue; 108 unsigned int hd_flags; 109 unsigned int hd_irq; 110 unsigned int hd_hdwirq; 111 char hd_name[HPET_DEV_NAME]; 112 }; 113 114 struct hpets { 115 struct hpets *hp_next; 116 struct hpet __iomem *hp_hpet; 117 unsigned long hp_hpet_phys; 118 struct clocksource *hp_clocksource; 119 unsigned long long hp_tick_freq; 120 unsigned long hp_delta; 121 unsigned int hp_ntimer; 122 unsigned int hp_which; 123 struct hpet_dev hp_dev[1]; 124 }; 125 126 static struct hpets *hpets; 127 128 #define HPET_OPEN 0x0001 129 #define HPET_IE 0x0002 /* interrupt enabled */ 130 #define HPET_PERIODIC 0x0004 131 #define HPET_SHARED_IRQ 0x0008 132 133 134 #ifndef readq 135 static inline unsigned long long readq(void __iomem *addr) 136 { 137 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); 138 } 139 #endif 140 141 #ifndef writeq 142 static inline void writeq(unsigned long long v, void __iomem *addr) 143 { 144 writel(v & 0xffffffff, addr); 145 writel(v >> 32, addr + 4); 146 } 147 #endif 148 149 static irqreturn_t hpet_interrupt(int irq, void *data) 150 { 151 struct hpet_dev *devp; 152 unsigned long isr; 153 154 devp = data; 155 isr = 1 << (devp - devp->hd_hpets->hp_dev); 156 157 if ((devp->hd_flags & HPET_SHARED_IRQ) && 158 !(isr & readl(&devp->hd_hpet->hpet_isr))) 159 return IRQ_NONE; 160 161 spin_lock(&hpet_lock); 162 devp->hd_irqdata++; 163 164 /* 165 * For non-periodic timers, increment the accumulator. 166 * This has the effect of treating non-periodic like periodic. 167 */ 168 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 169 unsigned long m, t; 170 171 t = devp->hd_ireqfreq; 172 m = read_counter(&devp->hd_timer->hpet_compare); 173 write_counter(t + m, &devp->hd_timer->hpet_compare); 174 } 175 176 if (devp->hd_flags & HPET_SHARED_IRQ) 177 writel(isr, &devp->hd_hpet->hpet_isr); 178 spin_unlock(&hpet_lock); 179 180 wake_up_interruptible(&devp->hd_waitqueue); 181 182 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 183 184 return IRQ_HANDLED; 185 } 186 187 static void hpet_timer_set_irq(struct hpet_dev *devp) 188 { 189 unsigned long v; 190 int irq, gsi; 191 struct hpet_timer __iomem *timer; 192 193 spin_lock_irq(&hpet_lock); 194 if (devp->hd_hdwirq) { 195 spin_unlock_irq(&hpet_lock); 196 return; 197 } 198 199 timer = devp->hd_timer; 200 201 /* we prefer level triggered mode */ 202 v = readl(&timer->hpet_config); 203 if (!(v & Tn_INT_TYPE_CNF_MASK)) { 204 v |= Tn_INT_TYPE_CNF_MASK; 205 writel(v, &timer->hpet_config); 206 } 207 spin_unlock_irq(&hpet_lock); 208 209 v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> 210 Tn_INT_ROUTE_CAP_SHIFT; 211 212 /* 213 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by 214 * legacy device. In IO APIC mode, we skip all the legacy IRQS. 215 */ 216 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) 217 v &= ~0xf3df; 218 else 219 v &= ~0xffff; 220 221 for_each_set_bit(irq, &v, HPET_MAX_IRQ) { 222 if (irq >= nr_irqs) { 223 irq = HPET_MAX_IRQ; 224 break; 225 } 226 227 gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, 228 ACPI_ACTIVE_LOW); 229 if (gsi > 0) 230 break; 231 232 /* FIXME: Setup interrupt source table */ 233 } 234 235 if (irq < HPET_MAX_IRQ) { 236 spin_lock_irq(&hpet_lock); 237 v = readl(&timer->hpet_config); 238 v |= irq << Tn_INT_ROUTE_CNF_SHIFT; 239 writel(v, &timer->hpet_config); 240 devp->hd_hdwirq = gsi; 241 spin_unlock_irq(&hpet_lock); 242 } 243 return; 244 } 245 246 static int hpet_open(struct inode *inode, struct file *file) 247 { 248 struct hpet_dev *devp; 249 struct hpets *hpetp; 250 int i; 251 252 if (file->f_mode & FMODE_WRITE) 253 return -EINVAL; 254 255 mutex_lock(&hpet_mutex); 256 spin_lock_irq(&hpet_lock); 257 258 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 259 for (i = 0; i < hpetp->hp_ntimer; i++) 260 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) 261 continue; 262 else { 263 devp = &hpetp->hp_dev[i]; 264 break; 265 } 266 267 if (!devp) { 268 spin_unlock_irq(&hpet_lock); 269 mutex_unlock(&hpet_mutex); 270 return -EBUSY; 271 } 272 273 file->private_data = devp; 274 devp->hd_irqdata = 0; 275 devp->hd_flags |= HPET_OPEN; 276 spin_unlock_irq(&hpet_lock); 277 mutex_unlock(&hpet_mutex); 278 279 hpet_timer_set_irq(devp); 280 281 return 0; 282 } 283 284 static ssize_t 285 hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) 286 { 287 DECLARE_WAITQUEUE(wait, current); 288 unsigned long data; 289 ssize_t retval; 290 struct hpet_dev *devp; 291 292 devp = file->private_data; 293 if (!devp->hd_ireqfreq) 294 return -EIO; 295 296 if (count < sizeof(unsigned long)) 297 return -EINVAL; 298 299 add_wait_queue(&devp->hd_waitqueue, &wait); 300 301 for ( ; ; ) { 302 set_current_state(TASK_INTERRUPTIBLE); 303 304 spin_lock_irq(&hpet_lock); 305 data = devp->hd_irqdata; 306 devp->hd_irqdata = 0; 307 spin_unlock_irq(&hpet_lock); 308 309 if (data) 310 break; 311 else if (file->f_flags & O_NONBLOCK) { 312 retval = -EAGAIN; 313 goto out; 314 } else if (signal_pending(current)) { 315 retval = -ERESTARTSYS; 316 goto out; 317 } 318 schedule(); 319 } 320 321 retval = put_user(data, (unsigned long __user *)buf); 322 if (!retval) 323 retval = sizeof(unsigned long); 324 out: 325 __set_current_state(TASK_RUNNING); 326 remove_wait_queue(&devp->hd_waitqueue, &wait); 327 328 return retval; 329 } 330 331 static unsigned int hpet_poll(struct file *file, poll_table * wait) 332 { 333 unsigned long v; 334 struct hpet_dev *devp; 335 336 devp = file->private_data; 337 338 if (!devp->hd_ireqfreq) 339 return 0; 340 341 poll_wait(file, &devp->hd_waitqueue, wait); 342 343 spin_lock_irq(&hpet_lock); 344 v = devp->hd_irqdata; 345 spin_unlock_irq(&hpet_lock); 346 347 if (v != 0) 348 return POLLIN | POLLRDNORM; 349 350 return 0; 351 } 352 353 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 354 { 355 #ifdef CONFIG_HPET_MMAP 356 struct hpet_dev *devp; 357 unsigned long addr; 358 359 if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) 360 return -EINVAL; 361 362 devp = file->private_data; 363 addr = devp->hd_hpets->hp_hpet_phys; 364 365 if (addr & (PAGE_SIZE - 1)) 366 return -ENOSYS; 367 368 vma->vm_flags |= VM_IO; 369 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 370 371 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 372 PAGE_SIZE, vma->vm_page_prot)) { 373 printk(KERN_ERR "%s: io_remap_pfn_range failed\n", 374 __func__); 375 return -EAGAIN; 376 } 377 378 return 0; 379 #else 380 return -ENOSYS; 381 #endif 382 } 383 384 static int hpet_fasync(int fd, struct file *file, int on) 385 { 386 struct hpet_dev *devp; 387 388 devp = file->private_data; 389 390 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) 391 return 0; 392 else 393 return -EIO; 394 } 395 396 static int hpet_release(struct inode *inode, struct file *file) 397 { 398 struct hpet_dev *devp; 399 struct hpet_timer __iomem *timer; 400 int irq = 0; 401 402 devp = file->private_data; 403 timer = devp->hd_timer; 404 405 spin_lock_irq(&hpet_lock); 406 407 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 408 &timer->hpet_config); 409 410 irq = devp->hd_irq; 411 devp->hd_irq = 0; 412 413 devp->hd_ireqfreq = 0; 414 415 if (devp->hd_flags & HPET_PERIODIC 416 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 417 unsigned long v; 418 419 v = readq(&timer->hpet_config); 420 v ^= Tn_TYPE_CNF_MASK; 421 writeq(v, &timer->hpet_config); 422 } 423 424 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); 425 spin_unlock_irq(&hpet_lock); 426 427 if (irq) 428 free_irq(irq, devp); 429 430 file->private_data = NULL; 431 return 0; 432 } 433 434 static int hpet_ioctl_ieon(struct hpet_dev *devp) 435 { 436 struct hpet_timer __iomem *timer; 437 struct hpet __iomem *hpet; 438 struct hpets *hpetp; 439 int irq; 440 unsigned long g, v, t, m; 441 unsigned long flags, isr; 442 443 timer = devp->hd_timer; 444 hpet = devp->hd_hpet; 445 hpetp = devp->hd_hpets; 446 447 if (!devp->hd_ireqfreq) 448 return -EIO; 449 450 spin_lock_irq(&hpet_lock); 451 452 if (devp->hd_flags & HPET_IE) { 453 spin_unlock_irq(&hpet_lock); 454 return -EBUSY; 455 } 456 457 devp->hd_flags |= HPET_IE; 458 459 if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) 460 devp->hd_flags |= HPET_SHARED_IRQ; 461 spin_unlock_irq(&hpet_lock); 462 463 irq = devp->hd_hdwirq; 464 465 if (irq) { 466 unsigned long irq_flags; 467 468 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 469 irq_flags = devp->hd_flags & HPET_SHARED_IRQ 470 ? IRQF_SHARED : IRQF_DISABLED; 471 if (request_irq(irq, hpet_interrupt, irq_flags, 472 devp->hd_name, (void *)devp)) { 473 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 474 irq = 0; 475 } 476 } 477 478 if (irq == 0) { 479 spin_lock_irq(&hpet_lock); 480 devp->hd_flags ^= HPET_IE; 481 spin_unlock_irq(&hpet_lock); 482 return -EIO; 483 } 484 485 devp->hd_irq = irq; 486 t = devp->hd_ireqfreq; 487 v = readq(&timer->hpet_config); 488 489 /* 64-bit comparators are not yet supported through the ioctls, 490 * so force this into 32-bit mode if it supports both modes 491 */ 492 g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; 493 494 if (devp->hd_flags & HPET_PERIODIC) { 495 g |= Tn_TYPE_CNF_MASK; 496 v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; 497 writeq(v, &timer->hpet_config); 498 local_irq_save(flags); 499 500 /* 501 * NOTE: First we modify the hidden accumulator 502 * register supported by periodic-capable comparators. 503 * We never want to modify the (single) counter; that 504 * would affect all the comparators. The value written 505 * is the counter value when the first interrupt is due. 506 */ 507 m = read_counter(&hpet->hpet_mc); 508 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 509 /* 510 * Then we modify the comparator, indicating the period 511 * for subsequent interrupt. 512 */ 513 write_counter(t, &timer->hpet_compare); 514 } else { 515 local_irq_save(flags); 516 m = read_counter(&hpet->hpet_mc); 517 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 518 } 519 520 if (devp->hd_flags & HPET_SHARED_IRQ) { 521 isr = 1 << (devp - devp->hd_hpets->hp_dev); 522 writel(isr, &hpet->hpet_isr); 523 } 524 writeq(g, &timer->hpet_config); 525 local_irq_restore(flags); 526 527 return 0; 528 } 529 530 /* converts Hz to number of timer ticks */ 531 static inline unsigned long hpet_time_div(struct hpets *hpets, 532 unsigned long dis) 533 { 534 unsigned long long m; 535 536 m = hpets->hp_tick_freq + (dis >> 1); 537 do_div(m, dis); 538 return (unsigned long)m; 539 } 540 541 static int 542 hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, 543 struct hpet_info *info) 544 { 545 struct hpet_timer __iomem *timer; 546 struct hpet __iomem *hpet; 547 struct hpets *hpetp; 548 int err; 549 unsigned long v; 550 551 switch (cmd) { 552 case HPET_IE_OFF: 553 case HPET_INFO: 554 case HPET_EPI: 555 case HPET_DPI: 556 case HPET_IRQFREQ: 557 timer = devp->hd_timer; 558 hpet = devp->hd_hpet; 559 hpetp = devp->hd_hpets; 560 break; 561 case HPET_IE_ON: 562 return hpet_ioctl_ieon(devp); 563 default: 564 return -EINVAL; 565 } 566 567 err = 0; 568 569 switch (cmd) { 570 case HPET_IE_OFF: 571 if ((devp->hd_flags & HPET_IE) == 0) 572 break; 573 v = readq(&timer->hpet_config); 574 v &= ~Tn_INT_ENB_CNF_MASK; 575 writeq(v, &timer->hpet_config); 576 if (devp->hd_irq) { 577 free_irq(devp->hd_irq, devp); 578 devp->hd_irq = 0; 579 } 580 devp->hd_flags ^= HPET_IE; 581 break; 582 case HPET_INFO: 583 { 584 if (devp->hd_ireqfreq) 585 info->hi_ireqfreq = 586 hpet_time_div(hpetp, devp->hd_ireqfreq); 587 else 588 info->hi_ireqfreq = 0; 589 info->hi_flags = 590 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 591 info->hi_hpet = hpetp->hp_which; 592 info->hi_timer = devp - hpetp->hp_dev; 593 break; 594 } 595 case HPET_EPI: 596 v = readq(&timer->hpet_config); 597 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 598 err = -ENXIO; 599 break; 600 } 601 devp->hd_flags |= HPET_PERIODIC; 602 break; 603 case HPET_DPI: 604 v = readq(&timer->hpet_config); 605 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 606 err = -ENXIO; 607 break; 608 } 609 if (devp->hd_flags & HPET_PERIODIC && 610 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 611 v = readq(&timer->hpet_config); 612 v ^= Tn_TYPE_CNF_MASK; 613 writeq(v, &timer->hpet_config); 614 } 615 devp->hd_flags &= ~HPET_PERIODIC; 616 break; 617 case HPET_IRQFREQ: 618 if ((arg > hpet_max_freq) && 619 !capable(CAP_SYS_RESOURCE)) { 620 err = -EACCES; 621 break; 622 } 623 624 if (!arg) { 625 err = -EINVAL; 626 break; 627 } 628 629 devp->hd_ireqfreq = hpet_time_div(hpetp, arg); 630 } 631 632 return err; 633 } 634 635 static long 636 hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 637 { 638 struct hpet_info info; 639 int err; 640 641 mutex_lock(&hpet_mutex); 642 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 643 mutex_unlock(&hpet_mutex); 644 645 if ((cmd == HPET_INFO) && !err && 646 (copy_to_user((void __user *)arg, &info, sizeof(info)))) 647 err = -EFAULT; 648 649 return err; 650 } 651 652 #ifdef CONFIG_COMPAT 653 struct compat_hpet_info { 654 compat_ulong_t hi_ireqfreq; /* Hz */ 655 compat_ulong_t hi_flags; /* information */ 656 unsigned short hi_hpet; 657 unsigned short hi_timer; 658 }; 659 660 static long 661 hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 662 { 663 struct hpet_info info; 664 int err; 665 666 mutex_lock(&hpet_mutex); 667 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 668 mutex_unlock(&hpet_mutex); 669 670 if ((cmd == HPET_INFO) && !err) { 671 struct compat_hpet_info __user *u = compat_ptr(arg); 672 if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || 673 put_user(info.hi_flags, &u->hi_flags) || 674 put_user(info.hi_hpet, &u->hi_hpet) || 675 put_user(info.hi_timer, &u->hi_timer)) 676 err = -EFAULT; 677 } 678 679 return err; 680 } 681 #endif 682 683 static const struct file_operations hpet_fops = { 684 .owner = THIS_MODULE, 685 .llseek = no_llseek, 686 .read = hpet_read, 687 .poll = hpet_poll, 688 .unlocked_ioctl = hpet_ioctl, 689 #ifdef CONFIG_COMPAT 690 .compat_ioctl = hpet_compat_ioctl, 691 #endif 692 .open = hpet_open, 693 .release = hpet_release, 694 .fasync = hpet_fasync, 695 .mmap = hpet_mmap, 696 }; 697 698 static int hpet_is_known(struct hpet_data *hdp) 699 { 700 struct hpets *hpetp; 701 702 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 703 if (hpetp->hp_hpet_phys == hdp->hd_phys_address) 704 return 1; 705 706 return 0; 707 } 708 709 static ctl_table hpet_table[] = { 710 { 711 .procname = "max-user-freq", 712 .data = &hpet_max_freq, 713 .maxlen = sizeof(int), 714 .mode = 0644, 715 .proc_handler = proc_dointvec, 716 }, 717 {} 718 }; 719 720 static ctl_table hpet_root[] = { 721 { 722 .procname = "hpet", 723 .maxlen = 0, 724 .mode = 0555, 725 .child = hpet_table, 726 }, 727 {} 728 }; 729 730 static ctl_table dev_root[] = { 731 { 732 .procname = "dev", 733 .maxlen = 0, 734 .mode = 0555, 735 .child = hpet_root, 736 }, 737 {} 738 }; 739 740 static struct ctl_table_header *sysctl_header; 741 742 /* 743 * Adjustment for when arming the timer with 744 * initial conditions. That is, main counter 745 * ticks expired before interrupts are enabled. 746 */ 747 #define TICK_CALIBRATE (1000UL) 748 749 static unsigned long __hpet_calibrate(struct hpets *hpetp) 750 { 751 struct hpet_timer __iomem *timer = NULL; 752 unsigned long t, m, count, i, flags, start; 753 struct hpet_dev *devp; 754 int j; 755 struct hpet __iomem *hpet; 756 757 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) 758 if ((devp->hd_flags & HPET_OPEN) == 0) { 759 timer = devp->hd_timer; 760 break; 761 } 762 763 if (!timer) 764 return 0; 765 766 hpet = hpetp->hp_hpet; 767 t = read_counter(&timer->hpet_compare); 768 769 i = 0; 770 count = hpet_time_div(hpetp, TICK_CALIBRATE); 771 772 local_irq_save(flags); 773 774 start = read_counter(&hpet->hpet_mc); 775 776 do { 777 m = read_counter(&hpet->hpet_mc); 778 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 779 } while (i++, (m - start) < count); 780 781 local_irq_restore(flags); 782 783 return (m - start) / i; 784 } 785 786 static unsigned long hpet_calibrate(struct hpets *hpetp) 787 { 788 unsigned long ret = -1; 789 unsigned long tmp; 790 791 /* 792 * Try to calibrate until return value becomes stable small value. 793 * If SMI interruption occurs in calibration loop, the return value 794 * will be big. This avoids its impact. 795 */ 796 for ( ; ; ) { 797 tmp = __hpet_calibrate(hpetp); 798 if (ret <= tmp) 799 break; 800 ret = tmp; 801 } 802 803 return ret; 804 } 805 806 int hpet_alloc(struct hpet_data *hdp) 807 { 808 u64 cap, mcfg; 809 struct hpet_dev *devp; 810 u32 i, ntimer; 811 struct hpets *hpetp; 812 size_t siz; 813 struct hpet __iomem *hpet; 814 static struct hpets *last = NULL; 815 unsigned long period; 816 unsigned long long temp; 817 u32 remainder; 818 819 /* 820 * hpet_alloc can be called by platform dependent code. 821 * If platform dependent code has allocated the hpet that 822 * ACPI has also reported, then we catch it here. 823 */ 824 if (hpet_is_known(hdp)) { 825 printk(KERN_DEBUG "%s: duplicate HPET ignored\n", 826 __func__); 827 return 0; 828 } 829 830 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 831 sizeof(struct hpet_dev)); 832 833 hpetp = kzalloc(siz, GFP_KERNEL); 834 835 if (!hpetp) 836 return -ENOMEM; 837 838 hpetp->hp_which = hpet_nhpet++; 839 hpetp->hp_hpet = hdp->hd_address; 840 hpetp->hp_hpet_phys = hdp->hd_phys_address; 841 842 hpetp->hp_ntimer = hdp->hd_nirqs; 843 844 for (i = 0; i < hdp->hd_nirqs; i++) 845 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 846 847 hpet = hpetp->hp_hpet; 848 849 cap = readq(&hpet->hpet_cap); 850 851 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; 852 853 if (hpetp->hp_ntimer != ntimer) { 854 printk(KERN_WARNING "hpet: number irqs doesn't agree" 855 " with number of timers\n"); 856 kfree(hpetp); 857 return -ENODEV; 858 } 859 860 if (last) 861 last->hp_next = hpetp; 862 else 863 hpets = hpetp; 864 865 last = hpetp; 866 867 period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 868 HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ 869 temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ 870 temp += period >> 1; /* round */ 871 do_div(temp, period); 872 hpetp->hp_tick_freq = temp; /* ticks per second */ 873 874 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 875 hpetp->hp_which, hdp->hd_phys_address, 876 hpetp->hp_ntimer > 1 ? "s" : ""); 877 for (i = 0; i < hpetp->hp_ntimer; i++) 878 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 879 printk("\n"); 880 881 temp = hpetp->hp_tick_freq; 882 remainder = do_div(temp, 1000000); 883 printk(KERN_INFO 884 "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", 885 hpetp->hp_which, hpetp->hp_ntimer, 886 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, 887 (unsigned) temp, remainder); 888 889 mcfg = readq(&hpet->hpet_config); 890 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 891 write_counter(0L, &hpet->hpet_mc); 892 mcfg |= HPET_ENABLE_CNF_MASK; 893 writeq(mcfg, &hpet->hpet_config); 894 } 895 896 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { 897 struct hpet_timer __iomem *timer; 898 899 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 900 901 devp->hd_hpets = hpetp; 902 devp->hd_hpet = hpet; 903 devp->hd_timer = timer; 904 905 /* 906 * If the timer was reserved by platform code, 907 * then make timer unavailable for opens. 908 */ 909 if (hdp->hd_state & (1 << i)) { 910 devp->hd_flags = HPET_OPEN; 911 continue; 912 } 913 914 init_waitqueue_head(&devp->hd_waitqueue); 915 } 916 917 hpetp->hp_delta = hpet_calibrate(hpetp); 918 919 /* This clocksource driver currently only works on ia64 */ 920 #ifdef CONFIG_IA64 921 if (!hpet_clocksource) { 922 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; 923 CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); 924 clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, 925 clocksource_hpet.shift); 926 clocksource_register(&clocksource_hpet); 927 hpetp->hp_clocksource = &clocksource_hpet; 928 hpet_clocksource = &clocksource_hpet; 929 } 930 #endif 931 932 return 0; 933 } 934 935 static acpi_status hpet_resources(struct acpi_resource *res, void *data) 936 { 937 struct hpet_data *hdp; 938 acpi_status status; 939 struct acpi_resource_address64 addr; 940 941 hdp = data; 942 943 status = acpi_resource_to_address64(res, &addr); 944 945 if (ACPI_SUCCESS(status)) { 946 hdp->hd_phys_address = addr.minimum; 947 hdp->hd_address = ioremap(addr.minimum, addr.address_length); 948 949 if (hpet_is_known(hdp)) { 950 iounmap(hdp->hd_address); 951 return AE_ALREADY_EXISTS; 952 } 953 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 954 struct acpi_resource_fixed_memory32 *fixmem32; 955 956 fixmem32 = &res->data.fixed_memory32; 957 if (!fixmem32) 958 return AE_NO_MEMORY; 959 960 hdp->hd_phys_address = fixmem32->address; 961 hdp->hd_address = ioremap(fixmem32->address, 962 HPET_RANGE_SIZE); 963 964 if (hpet_is_known(hdp)) { 965 iounmap(hdp->hd_address); 966 return AE_ALREADY_EXISTS; 967 } 968 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 969 struct acpi_resource_extended_irq *irqp; 970 int i, irq; 971 972 irqp = &res->data.extended_irq; 973 974 for (i = 0; i < irqp->interrupt_count; i++) { 975 irq = acpi_register_gsi(NULL, irqp->interrupts[i], 976 irqp->triggering, irqp->polarity); 977 if (irq < 0) 978 return AE_ERROR; 979 980 hdp->hd_irq[hdp->hd_nirqs] = irq; 981 hdp->hd_nirqs++; 982 } 983 } 984 985 return AE_OK; 986 } 987 988 static int hpet_acpi_add(struct acpi_device *device) 989 { 990 acpi_status result; 991 struct hpet_data data; 992 993 memset(&data, 0, sizeof(data)); 994 995 result = 996 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 997 hpet_resources, &data); 998 999 if (ACPI_FAILURE(result)) 1000 return -ENODEV; 1001 1002 if (!data.hd_address || !data.hd_nirqs) { 1003 printk("%s: no address or irqs in _CRS\n", __func__); 1004 return -ENODEV; 1005 } 1006 1007 return hpet_alloc(&data); 1008 } 1009 1010 static int hpet_acpi_remove(struct acpi_device *device, int type) 1011 { 1012 /* XXX need to unregister clocksource, dealloc mem, etc */ 1013 return -EINVAL; 1014 } 1015 1016 static const struct acpi_device_id hpet_device_ids[] = { 1017 {"PNP0103", 0}, 1018 {"", 0}, 1019 }; 1020 MODULE_DEVICE_TABLE(acpi, hpet_device_ids); 1021 1022 static struct acpi_driver hpet_acpi_driver = { 1023 .name = "hpet", 1024 .ids = hpet_device_ids, 1025 .ops = { 1026 .add = hpet_acpi_add, 1027 .remove = hpet_acpi_remove, 1028 }, 1029 }; 1030 1031 static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; 1032 1033 static int __init hpet_init(void) 1034 { 1035 int result; 1036 1037 result = misc_register(&hpet_misc); 1038 if (result < 0) 1039 return -ENODEV; 1040 1041 sysctl_header = register_sysctl_table(dev_root); 1042 1043 result = acpi_bus_register_driver(&hpet_acpi_driver); 1044 if (result < 0) { 1045 if (sysctl_header) 1046 unregister_sysctl_table(sysctl_header); 1047 misc_deregister(&hpet_misc); 1048 return result; 1049 } 1050 1051 return 0; 1052 } 1053 1054 static void __exit hpet_exit(void) 1055 { 1056 acpi_bus_unregister_driver(&hpet_acpi_driver); 1057 1058 if (sysctl_header) 1059 unregister_sysctl_table(sysctl_header); 1060 misc_deregister(&hpet_misc); 1061 1062 return; 1063 } 1064 1065 module_init(hpet_init); 1066 module_exit(hpet_exit); 1067 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 1068 MODULE_LICENSE("GPL"); 1069