1 /* 2 * Intel & MS High Precision Event Timer Implementation. 3 * 4 * Copyright (C) 2003 Intel Corporation 5 * Venki Pallipadi 6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. 7 * Bob Picco <robert.picco@hp.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/config.h> 15 #include <linux/interrupt.h> 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/miscdevice.h> 20 #include <linux/major.h> 21 #include <linux/ioport.h> 22 #include <linux/fcntl.h> 23 #include <linux/init.h> 24 #include <linux/poll.h> 25 #include <linux/proc_fs.h> 26 #include <linux/spinlock.h> 27 #include <linux/sysctl.h> 28 #include <linux/wait.h> 29 #include <linux/bcd.h> 30 #include <linux/seq_file.h> 31 #include <linux/bitops.h> 32 33 #include <asm/current.h> 34 #include <asm/uaccess.h> 35 #include <asm/system.h> 36 #include <asm/io.h> 37 #include <asm/irq.h> 38 #include <asm/div64.h> 39 40 #include <linux/acpi.h> 41 #include <acpi/acpi_bus.h> 42 #include <linux/hpet.h> 43 44 /* 45 * The High Precision Event Timer driver. 46 * This driver is closely modelled after the rtc.c driver. 47 * http://www.intel.com/hardwaredesign/hpetspec.htm 48 */ 49 #define HPET_USER_FREQ (64) 50 #define HPET_DRIFT (500) 51 52 static u32 hpet_ntimer, hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 53 54 /* A lock for concurrent access by app and isr hpet activity. */ 55 static DEFINE_SPINLOCK(hpet_lock); 56 /* A lock for concurrent intermodule access to hpet and isr hpet activity. */ 57 static DEFINE_SPINLOCK(hpet_task_lock); 58 59 #define HPET_DEV_NAME (7) 60 61 struct hpet_dev { 62 struct hpets *hd_hpets; 63 struct hpet __iomem *hd_hpet; 64 struct hpet_timer __iomem *hd_timer; 65 unsigned long hd_ireqfreq; 66 unsigned long hd_irqdata; 67 wait_queue_head_t hd_waitqueue; 68 struct fasync_struct *hd_async_queue; 69 struct hpet_task *hd_task; 70 unsigned int hd_flags; 71 unsigned int hd_irq; 72 unsigned int hd_hdwirq; 73 char hd_name[HPET_DEV_NAME]; 74 }; 75 76 struct hpets { 77 struct hpets *hp_next; 78 struct hpet __iomem *hp_hpet; 79 unsigned long hp_hpet_phys; 80 struct time_interpolator *hp_interpolator; 81 unsigned long hp_period; 82 unsigned long hp_delta; 83 unsigned int hp_ntimer; 84 unsigned int hp_which; 85 struct hpet_dev hp_dev[1]; 86 }; 87 88 static struct hpets *hpets; 89 90 #define HPET_OPEN 0x0001 91 #define HPET_IE 0x0002 /* interrupt enabled */ 92 #define HPET_PERIODIC 0x0004 93 94 #if BITS_PER_LONG == 64 95 #define write_counter(V, MC) writeq(V, MC) 96 #define read_counter(MC) readq(MC) 97 #else 98 #define write_counter(V, MC) writel(V, MC) 99 #define read_counter(MC) readl(MC) 100 #endif 101 102 #ifndef readq 103 static inline unsigned long long readq(void __iomem *addr) 104 { 105 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); 106 } 107 #endif 108 109 #ifndef writeq 110 static inline void writeq(unsigned long long v, void __iomem *addr) 111 { 112 writel(v & 0xffffffff, addr); 113 writel(v >> 32, addr + 4); 114 } 115 #endif 116 117 static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs) 118 { 119 struct hpet_dev *devp; 120 unsigned long isr; 121 122 devp = data; 123 124 spin_lock(&hpet_lock); 125 devp->hd_irqdata++; 126 127 /* 128 * For non-periodic timers, increment the accumulator. 129 * This has the effect of treating non-periodic like periodic. 130 */ 131 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 132 unsigned long m, t; 133 134 t = devp->hd_ireqfreq; 135 m = read_counter(&devp->hd_hpet->hpet_mc); 136 write_counter(t + m + devp->hd_hpets->hp_delta, 137 &devp->hd_timer->hpet_compare); 138 } 139 140 isr = (1 << (devp - devp->hd_hpets->hp_dev)); 141 writeq(isr, &devp->hd_hpet->hpet_isr); 142 spin_unlock(&hpet_lock); 143 144 spin_lock(&hpet_task_lock); 145 if (devp->hd_task) 146 devp->hd_task->ht_func(devp->hd_task->ht_data); 147 spin_unlock(&hpet_task_lock); 148 149 wake_up_interruptible(&devp->hd_waitqueue); 150 151 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 152 153 return IRQ_HANDLED; 154 } 155 156 static int hpet_open(struct inode *inode, struct file *file) 157 { 158 struct hpet_dev *devp; 159 struct hpets *hpetp; 160 int i; 161 162 if (file->f_mode & FMODE_WRITE) 163 return -EINVAL; 164 165 spin_lock_irq(&hpet_lock); 166 167 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 168 for (i = 0; i < hpetp->hp_ntimer; i++) 169 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN 170 || hpetp->hp_dev[i].hd_task) 171 continue; 172 else { 173 devp = &hpetp->hp_dev[i]; 174 break; 175 } 176 177 if (!devp) { 178 spin_unlock_irq(&hpet_lock); 179 return -EBUSY; 180 } 181 182 file->private_data = devp; 183 devp->hd_irqdata = 0; 184 devp->hd_flags |= HPET_OPEN; 185 spin_unlock_irq(&hpet_lock); 186 187 return 0; 188 } 189 190 static ssize_t 191 hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) 192 { 193 DECLARE_WAITQUEUE(wait, current); 194 unsigned long data; 195 ssize_t retval; 196 struct hpet_dev *devp; 197 198 devp = file->private_data; 199 if (!devp->hd_ireqfreq) 200 return -EIO; 201 202 if (count < sizeof(unsigned long)) 203 return -EINVAL; 204 205 add_wait_queue(&devp->hd_waitqueue, &wait); 206 207 for ( ; ; ) { 208 set_current_state(TASK_INTERRUPTIBLE); 209 210 spin_lock_irq(&hpet_lock); 211 data = devp->hd_irqdata; 212 devp->hd_irqdata = 0; 213 spin_unlock_irq(&hpet_lock); 214 215 if (data) 216 break; 217 else if (file->f_flags & O_NONBLOCK) { 218 retval = -EAGAIN; 219 goto out; 220 } else if (signal_pending(current)) { 221 retval = -ERESTARTSYS; 222 goto out; 223 } 224 schedule(); 225 } 226 227 retval = put_user(data, (unsigned long __user *)buf); 228 if (!retval) 229 retval = sizeof(unsigned long); 230 out: 231 __set_current_state(TASK_RUNNING); 232 remove_wait_queue(&devp->hd_waitqueue, &wait); 233 234 return retval; 235 } 236 237 static unsigned int hpet_poll(struct file *file, poll_table * wait) 238 { 239 unsigned long v; 240 struct hpet_dev *devp; 241 242 devp = file->private_data; 243 244 if (!devp->hd_ireqfreq) 245 return 0; 246 247 poll_wait(file, &devp->hd_waitqueue, wait); 248 249 spin_lock_irq(&hpet_lock); 250 v = devp->hd_irqdata; 251 spin_unlock_irq(&hpet_lock); 252 253 if (v != 0) 254 return POLLIN | POLLRDNORM; 255 256 return 0; 257 } 258 259 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 260 { 261 #ifdef CONFIG_HPET_MMAP 262 struct hpet_dev *devp; 263 unsigned long addr; 264 265 if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) 266 return -EINVAL; 267 268 devp = file->private_data; 269 addr = devp->hd_hpets->hp_hpet_phys; 270 271 if (addr & (PAGE_SIZE - 1)) 272 return -ENOSYS; 273 274 vma->vm_flags |= VM_IO; 275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 276 277 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 278 PAGE_SIZE, vma->vm_page_prot)) { 279 printk(KERN_ERR "remap_pfn_range failed in hpet.c\n"); 280 return -EAGAIN; 281 } 282 283 return 0; 284 #else 285 return -ENOSYS; 286 #endif 287 } 288 289 static int hpet_fasync(int fd, struct file *file, int on) 290 { 291 struct hpet_dev *devp; 292 293 devp = file->private_data; 294 295 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) 296 return 0; 297 else 298 return -EIO; 299 } 300 301 static int hpet_release(struct inode *inode, struct file *file) 302 { 303 struct hpet_dev *devp; 304 struct hpet_timer __iomem *timer; 305 int irq = 0; 306 307 devp = file->private_data; 308 timer = devp->hd_timer; 309 310 spin_lock_irq(&hpet_lock); 311 312 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 313 &timer->hpet_config); 314 315 irq = devp->hd_irq; 316 devp->hd_irq = 0; 317 318 devp->hd_ireqfreq = 0; 319 320 if (devp->hd_flags & HPET_PERIODIC 321 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 322 unsigned long v; 323 324 v = readq(&timer->hpet_config); 325 v ^= Tn_TYPE_CNF_MASK; 326 writeq(v, &timer->hpet_config); 327 } 328 329 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); 330 spin_unlock_irq(&hpet_lock); 331 332 if (irq) 333 free_irq(irq, devp); 334 335 if (file->f_flags & FASYNC) 336 hpet_fasync(-1, file, 0); 337 338 file->private_data = NULL; 339 return 0; 340 } 341 342 static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); 343 344 static int 345 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 346 unsigned long arg) 347 { 348 struct hpet_dev *devp; 349 350 devp = file->private_data; 351 return hpet_ioctl_common(devp, cmd, arg, 0); 352 } 353 354 static int hpet_ioctl_ieon(struct hpet_dev *devp) 355 { 356 struct hpet_timer __iomem *timer; 357 struct hpet __iomem *hpet; 358 struct hpets *hpetp; 359 int irq; 360 unsigned long g, v, t, m; 361 unsigned long flags, isr; 362 363 timer = devp->hd_timer; 364 hpet = devp->hd_hpet; 365 hpetp = devp->hd_hpets; 366 367 v = readq(&timer->hpet_config); 368 spin_lock_irq(&hpet_lock); 369 370 if (devp->hd_flags & HPET_IE) { 371 spin_unlock_irq(&hpet_lock); 372 return -EBUSY; 373 } 374 375 devp->hd_flags |= HPET_IE; 376 spin_unlock_irq(&hpet_lock); 377 378 t = readq(&timer->hpet_config); 379 irq = devp->hd_hdwirq; 380 381 if (irq) { 382 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 383 384 if (request_irq 385 (irq, hpet_interrupt, SA_INTERRUPT, devp->hd_name, (void *)devp)) { 386 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 387 irq = 0; 388 } 389 } 390 391 if (irq == 0) { 392 spin_lock_irq(&hpet_lock); 393 devp->hd_flags ^= HPET_IE; 394 spin_unlock_irq(&hpet_lock); 395 return -EIO; 396 } 397 398 devp->hd_irq = irq; 399 t = devp->hd_ireqfreq; 400 v = readq(&timer->hpet_config); 401 g = v | Tn_INT_ENB_CNF_MASK; 402 403 if (devp->hd_flags & HPET_PERIODIC) { 404 write_counter(t, &timer->hpet_compare); 405 g |= Tn_TYPE_CNF_MASK; 406 v |= Tn_TYPE_CNF_MASK; 407 writeq(v, &timer->hpet_config); 408 v |= Tn_VAL_SET_CNF_MASK; 409 writeq(v, &timer->hpet_config); 410 local_irq_save(flags); 411 m = read_counter(&hpet->hpet_mc); 412 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 413 } else { 414 local_irq_save(flags); 415 m = read_counter(&hpet->hpet_mc); 416 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 417 } 418 419 isr = (1 << (devp - hpets->hp_dev)); 420 writeq(isr, &hpet->hpet_isr); 421 writeq(g, &timer->hpet_config); 422 local_irq_restore(flags); 423 424 return 0; 425 } 426 427 static inline unsigned long hpet_time_div(unsigned long dis) 428 { 429 unsigned long long m = 1000000000000000ULL; 430 431 do_div(m, dis); 432 433 return (unsigned long)m; 434 } 435 436 static int 437 hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) 438 { 439 struct hpet_timer __iomem *timer; 440 struct hpet __iomem *hpet; 441 struct hpets *hpetp; 442 int err; 443 unsigned long v; 444 445 switch (cmd) { 446 case HPET_IE_OFF: 447 case HPET_INFO: 448 case HPET_EPI: 449 case HPET_DPI: 450 case HPET_IRQFREQ: 451 timer = devp->hd_timer; 452 hpet = devp->hd_hpet; 453 hpetp = devp->hd_hpets; 454 break; 455 case HPET_IE_ON: 456 return hpet_ioctl_ieon(devp); 457 default: 458 return -EINVAL; 459 } 460 461 err = 0; 462 463 switch (cmd) { 464 case HPET_IE_OFF: 465 if ((devp->hd_flags & HPET_IE) == 0) 466 break; 467 v = readq(&timer->hpet_config); 468 v &= ~Tn_INT_ENB_CNF_MASK; 469 writeq(v, &timer->hpet_config); 470 if (devp->hd_irq) { 471 free_irq(devp->hd_irq, devp); 472 devp->hd_irq = 0; 473 } 474 devp->hd_flags ^= HPET_IE; 475 break; 476 case HPET_INFO: 477 { 478 struct hpet_info info; 479 480 info.hi_ireqfreq = hpet_time_div(hpetp->hp_period * 481 devp->hd_ireqfreq); 482 info.hi_flags = 483 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 484 info.hi_hpet = devp->hd_hpets->hp_which; 485 info.hi_timer = devp - devp->hd_hpets->hp_dev; 486 if (copy_to_user((void __user *)arg, &info, sizeof(info))) 487 err = -EFAULT; 488 break; 489 } 490 case HPET_EPI: 491 v = readq(&timer->hpet_config); 492 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 493 err = -ENXIO; 494 break; 495 } 496 devp->hd_flags |= HPET_PERIODIC; 497 break; 498 case HPET_DPI: 499 v = readq(&timer->hpet_config); 500 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 501 err = -ENXIO; 502 break; 503 } 504 if (devp->hd_flags & HPET_PERIODIC && 505 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 506 v = readq(&timer->hpet_config); 507 v ^= Tn_TYPE_CNF_MASK; 508 writeq(v, &timer->hpet_config); 509 } 510 devp->hd_flags &= ~HPET_PERIODIC; 511 break; 512 case HPET_IRQFREQ: 513 if (!kernel && (arg > hpet_max_freq) && 514 !capable(CAP_SYS_RESOURCE)) { 515 err = -EACCES; 516 break; 517 } 518 519 if (arg & (arg - 1)) { 520 err = -EINVAL; 521 break; 522 } 523 524 devp->hd_ireqfreq = hpet_time_div(hpetp->hp_period * arg); 525 } 526 527 return err; 528 } 529 530 static struct file_operations hpet_fops = { 531 .owner = THIS_MODULE, 532 .llseek = no_llseek, 533 .read = hpet_read, 534 .poll = hpet_poll, 535 .ioctl = hpet_ioctl, 536 .open = hpet_open, 537 .release = hpet_release, 538 .fasync = hpet_fasync, 539 .mmap = hpet_mmap, 540 }; 541 542 EXPORT_SYMBOL(hpet_alloc); 543 EXPORT_SYMBOL(hpet_register); 544 EXPORT_SYMBOL(hpet_unregister); 545 EXPORT_SYMBOL(hpet_control); 546 547 int hpet_register(struct hpet_task *tp, int periodic) 548 { 549 unsigned int i; 550 u64 mask; 551 struct hpet_timer __iomem *timer; 552 struct hpet_dev *devp; 553 struct hpets *hpetp; 554 555 switch (periodic) { 556 case 1: 557 mask = Tn_PER_INT_CAP_MASK; 558 break; 559 case 0: 560 mask = 0; 561 break; 562 default: 563 return -EINVAL; 564 } 565 566 spin_lock_irq(&hpet_task_lock); 567 spin_lock(&hpet_lock); 568 569 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 570 for (timer = hpetp->hp_hpet->hpet_timers, i = 0; 571 i < hpetp->hp_ntimer; i++, timer++) { 572 if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK) 573 != mask) 574 continue; 575 576 devp = &hpetp->hp_dev[i]; 577 578 if (devp->hd_flags & HPET_OPEN || devp->hd_task) { 579 devp = NULL; 580 continue; 581 } 582 583 tp->ht_opaque = devp; 584 devp->hd_task = tp; 585 break; 586 } 587 588 spin_unlock(&hpet_lock); 589 spin_unlock_irq(&hpet_task_lock); 590 591 if (tp->ht_opaque) 592 return 0; 593 else 594 return -EBUSY; 595 } 596 597 static inline int hpet_tpcheck(struct hpet_task *tp) 598 { 599 struct hpet_dev *devp; 600 struct hpets *hpetp; 601 602 devp = tp->ht_opaque; 603 604 if (!devp) 605 return -ENXIO; 606 607 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 608 if (devp >= hpetp->hp_dev 609 && devp < (hpetp->hp_dev + hpetp->hp_ntimer) 610 && devp->hd_hpet == hpetp->hp_hpet) 611 return 0; 612 613 return -ENXIO; 614 } 615 616 int hpet_unregister(struct hpet_task *tp) 617 { 618 struct hpet_dev *devp; 619 struct hpet_timer __iomem *timer; 620 int err; 621 622 if ((err = hpet_tpcheck(tp))) 623 return err; 624 625 spin_lock_irq(&hpet_task_lock); 626 spin_lock(&hpet_lock); 627 628 devp = tp->ht_opaque; 629 if (devp->hd_task != tp) { 630 spin_unlock(&hpet_lock); 631 spin_unlock_irq(&hpet_task_lock); 632 return -ENXIO; 633 } 634 635 timer = devp->hd_timer; 636 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 637 &timer->hpet_config); 638 devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC); 639 devp->hd_task = NULL; 640 spin_unlock(&hpet_lock); 641 spin_unlock_irq(&hpet_task_lock); 642 643 return 0; 644 } 645 646 int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) 647 { 648 struct hpet_dev *devp; 649 int err; 650 651 if ((err = hpet_tpcheck(tp))) 652 return err; 653 654 spin_lock_irq(&hpet_lock); 655 devp = tp->ht_opaque; 656 if (devp->hd_task != tp) { 657 spin_unlock_irq(&hpet_lock); 658 return -ENXIO; 659 } 660 spin_unlock_irq(&hpet_lock); 661 return hpet_ioctl_common(devp, cmd, arg, 1); 662 } 663 664 static ctl_table hpet_table[] = { 665 { 666 .ctl_name = 1, 667 .procname = "max-user-freq", 668 .data = &hpet_max_freq, 669 .maxlen = sizeof(int), 670 .mode = 0644, 671 .proc_handler = &proc_dointvec, 672 }, 673 {.ctl_name = 0} 674 }; 675 676 static ctl_table hpet_root[] = { 677 { 678 .ctl_name = 1, 679 .procname = "hpet", 680 .maxlen = 0, 681 .mode = 0555, 682 .child = hpet_table, 683 }, 684 {.ctl_name = 0} 685 }; 686 687 static ctl_table dev_root[] = { 688 { 689 .ctl_name = CTL_DEV, 690 .procname = "dev", 691 .maxlen = 0, 692 .mode = 0555, 693 .child = hpet_root, 694 }, 695 {.ctl_name = 0} 696 }; 697 698 static struct ctl_table_header *sysctl_header; 699 700 static void hpet_register_interpolator(struct hpets *hpetp) 701 { 702 #ifdef CONFIG_TIME_INTERPOLATION 703 struct time_interpolator *ti; 704 705 ti = kmalloc(sizeof(*ti), GFP_KERNEL); 706 if (!ti) 707 return; 708 709 memset(ti, 0, sizeof(*ti)); 710 ti->source = TIME_SOURCE_MMIO64; 711 ti->shift = 10; 712 ti->addr = &hpetp->hp_hpet->hpet_mc; 713 ti->frequency = hpet_time_div(hpets->hp_period); 714 ti->drift = HPET_DRIFT; 715 ti->mask = -1; 716 717 hpetp->hp_interpolator = ti; 718 register_time_interpolator(ti); 719 #endif 720 } 721 722 /* 723 * Adjustment for when arming the timer with 724 * initial conditions. That is, main counter 725 * ticks expired before interrupts are enabled. 726 */ 727 #define TICK_CALIBRATE (1000UL) 728 729 static unsigned long hpet_calibrate(struct hpets *hpetp) 730 { 731 struct hpet_timer __iomem *timer = NULL; 732 unsigned long t, m, count, i, flags, start; 733 struct hpet_dev *devp; 734 int j; 735 struct hpet __iomem *hpet; 736 737 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) 738 if ((devp->hd_flags & HPET_OPEN) == 0) { 739 timer = devp->hd_timer; 740 break; 741 } 742 743 if (!timer) 744 return 0; 745 746 hpet = hpets->hp_hpet; 747 t = read_counter(&timer->hpet_compare); 748 749 i = 0; 750 count = hpet_time_div(hpetp->hp_period * TICK_CALIBRATE); 751 752 local_irq_save(flags); 753 754 start = read_counter(&hpet->hpet_mc); 755 756 do { 757 m = read_counter(&hpet->hpet_mc); 758 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 759 } while (i++, (m - start) < count); 760 761 local_irq_restore(flags); 762 763 return (m - start) / i; 764 } 765 766 int hpet_alloc(struct hpet_data *hdp) 767 { 768 u64 cap, mcfg; 769 struct hpet_dev *devp; 770 u32 i, ntimer; 771 struct hpets *hpetp; 772 size_t siz; 773 struct hpet __iomem *hpet; 774 static struct hpets *last = (struct hpets *)0; 775 unsigned long ns; 776 777 /* 778 * hpet_alloc can be called by platform dependent code. 779 * if platform dependent code has allocated the hpet 780 * ACPI also reports hpet, then we catch it here. 781 */ 782 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 783 if (hpetp->hp_hpet == hdp->hd_address) 784 return 0; 785 786 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 787 sizeof(struct hpet_dev)); 788 789 hpetp = kmalloc(siz, GFP_KERNEL); 790 791 if (!hpetp) 792 return -ENOMEM; 793 794 memset(hpetp, 0, siz); 795 796 hpetp->hp_which = hpet_nhpet++; 797 hpetp->hp_hpet = hdp->hd_address; 798 hpetp->hp_hpet_phys = hdp->hd_phys_address; 799 800 hpetp->hp_ntimer = hdp->hd_nirqs; 801 802 for (i = 0; i < hdp->hd_nirqs; i++) 803 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 804 805 hpet = hpetp->hp_hpet; 806 807 cap = readq(&hpet->hpet_cap); 808 809 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; 810 811 if (hpetp->hp_ntimer != ntimer) { 812 printk(KERN_WARNING "hpet: number irqs doesn't agree" 813 " with number of timers\n"); 814 kfree(hpetp); 815 return -ENODEV; 816 } 817 818 if (last) 819 last->hp_next = hpetp; 820 else 821 hpets = hpetp; 822 823 last = hpetp; 824 825 hpetp->hp_period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 826 HPET_COUNTER_CLK_PERIOD_SHIFT; 827 828 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 829 hpetp->hp_which, hdp->hd_phys_address, 830 hpetp->hp_ntimer > 1 ? "s" : ""); 831 for (i = 0; i < hpetp->hp_ntimer; i++) 832 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 833 printk("\n"); 834 835 ns = hpetp->hp_period; /* femptoseconds, 10^-15 */ 836 ns /= 1000000; /* convert to nanoseconds, 10^-9 */ 837 printk(KERN_INFO "hpet%d: %ldns tick, %d %d-bit timers\n", 838 hpetp->hp_which, ns, hpetp->hp_ntimer, 839 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32); 840 841 mcfg = readq(&hpet->hpet_config); 842 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 843 write_counter(0L, &hpet->hpet_mc); 844 mcfg |= HPET_ENABLE_CNF_MASK; 845 writeq(mcfg, &hpet->hpet_config); 846 } 847 848 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; 849 i++, hpet_ntimer++, devp++) { 850 unsigned long v; 851 struct hpet_timer __iomem *timer; 852 853 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 854 v = readq(&timer->hpet_config); 855 856 devp->hd_hpets = hpetp; 857 devp->hd_hpet = hpet; 858 devp->hd_timer = timer; 859 860 /* 861 * If the timer was reserved by platform code, 862 * then make timer unavailable for opens. 863 */ 864 if (hdp->hd_state & (1 << i)) { 865 devp->hd_flags = HPET_OPEN; 866 continue; 867 } 868 869 init_waitqueue_head(&devp->hd_waitqueue); 870 } 871 872 hpetp->hp_delta = hpet_calibrate(hpetp); 873 hpet_register_interpolator(hpetp); 874 875 return 0; 876 } 877 878 static acpi_status hpet_resources(struct acpi_resource *res, void *data) 879 { 880 struct hpet_data *hdp; 881 acpi_status status; 882 struct acpi_resource_address64 addr; 883 struct hpets *hpetp; 884 885 hdp = data; 886 887 status = acpi_resource_to_address64(res, &addr); 888 889 if (ACPI_SUCCESS(status)) { 890 unsigned long size; 891 892 size = addr.max_address_range - addr.min_address_range + 1; 893 hdp->hd_phys_address = addr.min_address_range; 894 hdp->hd_address = ioremap(addr.min_address_range, size); 895 896 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 897 if (hpetp->hp_hpet == hdp->hd_address) 898 return -EBUSY; 899 } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { 900 struct acpi_resource_ext_irq *irqp; 901 int i; 902 903 irqp = &res->data.extended_irq; 904 905 if (irqp->number_of_interrupts > 0) { 906 hdp->hd_nirqs = irqp->number_of_interrupts; 907 908 for (i = 0; i < hdp->hd_nirqs; i++) { 909 int rc = 910 acpi_register_gsi(irqp->interrupts[i], 911 irqp->edge_level, 912 irqp->active_high_low); 913 if (rc < 0) 914 return AE_ERROR; 915 hdp->hd_irq[i] = rc; 916 } 917 } 918 } 919 920 return AE_OK; 921 } 922 923 static int hpet_acpi_add(struct acpi_device *device) 924 { 925 acpi_status result; 926 struct hpet_data data; 927 928 memset(&data, 0, sizeof(data)); 929 930 result = 931 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 932 hpet_resources, &data); 933 934 if (ACPI_FAILURE(result)) 935 return -ENODEV; 936 937 if (!data.hd_address || !data.hd_nirqs) { 938 printk("%s: no address or irqs in _CRS\n", __FUNCTION__); 939 return -ENODEV; 940 } 941 942 return hpet_alloc(&data); 943 } 944 945 static int hpet_acpi_remove(struct acpi_device *device, int type) 946 { 947 /* XXX need to unregister interpolator, dealloc mem, etc */ 948 return -EINVAL; 949 } 950 951 static struct acpi_driver hpet_acpi_driver = { 952 .name = "hpet", 953 .ids = "PNP0103", 954 .ops = { 955 .add = hpet_acpi_add, 956 .remove = hpet_acpi_remove, 957 }, 958 }; 959 960 static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; 961 962 static int __init hpet_init(void) 963 { 964 int result; 965 966 result = misc_register(&hpet_misc); 967 if (result < 0) 968 return -ENODEV; 969 970 sysctl_header = register_sysctl_table(dev_root, 0); 971 972 result = acpi_bus_register_driver(&hpet_acpi_driver); 973 if (result < 0) { 974 if (sysctl_header) 975 unregister_sysctl_table(sysctl_header); 976 misc_deregister(&hpet_misc); 977 return result; 978 } 979 980 return 0; 981 } 982 983 static void __exit hpet_exit(void) 984 { 985 acpi_bus_unregister_driver(&hpet_acpi_driver); 986 987 if (sysctl_header) 988 unregister_sysctl_table(sysctl_header); 989 misc_deregister(&hpet_misc); 990 991 return; 992 } 993 994 module_init(hpet_init); 995 module_exit(hpet_exit); 996 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 997 MODULE_LICENSE("GPL"); 998