1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel & MS High Precision Event Timer Implementation. 4 * 5 * Copyright (C) 2003 Intel Corporation 6 * Venki Pallipadi 7 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. 8 * Bob Picco <robert.picco@hp.com> 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/miscdevice.h> 15 #include <linux/major.h> 16 #include <linux/ioport.h> 17 #include <linux/fcntl.h> 18 #include <linux/init.h> 19 #include <linux/io-64-nonatomic-lo-hi.h> 20 #include <linux/poll.h> 21 #include <linux/mm.h> 22 #include <linux/proc_fs.h> 23 #include <linux/spinlock.h> 24 #include <linux/sysctl.h> 25 #include <linux/wait.h> 26 #include <linux/sched/signal.h> 27 #include <linux/bcd.h> 28 #include <linux/seq_file.h> 29 #include <linux/bitops.h> 30 #include <linux/compat.h> 31 #include <linux/clocksource.h> 32 #include <linux/uaccess.h> 33 #include <linux/slab.h> 34 #include <linux/io.h> 35 #include <linux/acpi.h> 36 #include <linux/hpet.h> 37 #include <asm/current.h> 38 #include <asm/irq.h> 39 #include <asm/div64.h> 40 41 /* 42 * The High Precision Event Timer driver. 43 * This driver is closely modelled after the rtc.c driver. 44 * See HPET spec revision 1. 45 */ 46 #define HPET_USER_FREQ (64) 47 #define HPET_DRIFT (500) 48 49 #define HPET_RANGE_SIZE 1024 /* from HPET spec */ 50 51 52 /* WARNING -- don't get confused. These macros are never used 53 * to write the (single) counter, and rarely to read it. 54 * They're badly named; to fix, someday. 55 */ 56 #if BITS_PER_LONG == 64 57 #define write_counter(V, MC) writeq(V, MC) 58 #define read_counter(MC) readq(MC) 59 #else 60 #define write_counter(V, MC) writel(V, MC) 61 #define read_counter(MC) readl(MC) 62 #endif 63 64 static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ 65 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 66 67 /* A lock for concurrent access by app and isr hpet activity. */ 68 static DEFINE_SPINLOCK(hpet_lock); 69 70 #define HPET_DEV_NAME (7) 71 72 struct hpet_dev { 73 struct hpets *hd_hpets; 74 struct hpet __iomem *hd_hpet; 75 struct hpet_timer __iomem *hd_timer; 76 unsigned long hd_ireqfreq; 77 unsigned long hd_irqdata; 78 wait_queue_head_t hd_waitqueue; 79 struct fasync_struct *hd_async_queue; 80 unsigned int hd_flags; 81 unsigned int hd_irq; 82 unsigned int hd_hdwirq; 83 char hd_name[HPET_DEV_NAME]; 84 }; 85 86 struct hpets { 87 struct hpets *hp_next; 88 struct hpet __iomem *hp_hpet; 89 unsigned long hp_hpet_phys; 90 struct clocksource *hp_clocksource; 91 unsigned long long hp_tick_freq; 92 unsigned long hp_delta; 93 unsigned int hp_ntimer; 94 unsigned int hp_which; 95 struct hpet_dev hp_dev[] __counted_by(hp_ntimer); 96 }; 97 98 static struct hpets *hpets; 99 100 #define HPET_OPEN 0x0001 101 #define HPET_IE 0x0002 /* interrupt enabled */ 102 #define HPET_PERIODIC 0x0004 103 #define HPET_SHARED_IRQ 0x0008 104 105 static irqreturn_t hpet_interrupt(int irq, void *data) 106 { 107 struct hpet_dev *devp; 108 unsigned long isr; 109 110 devp = data; 111 isr = 1 << (devp - devp->hd_hpets->hp_dev); 112 113 if ((devp->hd_flags & HPET_SHARED_IRQ) && 114 !(isr & readl(&devp->hd_hpet->hpet_isr))) 115 return IRQ_NONE; 116 117 spin_lock(&hpet_lock); 118 devp->hd_irqdata++; 119 120 /* 121 * For non-periodic timers, increment the accumulator. 122 * This has the effect of treating non-periodic like periodic. 123 */ 124 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 125 unsigned long t, mc, base, k; 126 struct hpet __iomem *hpet = devp->hd_hpet; 127 struct hpets *hpetp = devp->hd_hpets; 128 129 t = devp->hd_ireqfreq; 130 read_counter(&devp->hd_timer->hpet_compare); 131 mc = read_counter(&hpet->hpet_mc); 132 /* The time for the next interrupt would logically be t + m, 133 * however, if we are very unlucky and the interrupt is delayed 134 * for longer than t then we will completely miss the next 135 * interrupt if we set t + m and an application will hang. 136 * Therefore we need to make a more complex computation assuming 137 * that there exists a k for which the following is true: 138 * k * t + base < mc + delta 139 * (k + 1) * t + base > mc + delta 140 * where t is the interval in hpet ticks for the given freq, 141 * base is the theoretical start value 0 < base < t, 142 * mc is the main counter value at the time of the interrupt, 143 * delta is the time it takes to write the a value to the 144 * comparator. 145 * k may then be computed as (mc - base + delta) / t . 146 */ 147 base = mc % t; 148 k = (mc - base + hpetp->hp_delta) / t; 149 write_counter(t * (k + 1) + base, 150 &devp->hd_timer->hpet_compare); 151 } 152 153 if (devp->hd_flags & HPET_SHARED_IRQ) 154 writel(isr, &devp->hd_hpet->hpet_isr); 155 spin_unlock(&hpet_lock); 156 157 wake_up_interruptible(&devp->hd_waitqueue); 158 159 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 160 161 return IRQ_HANDLED; 162 } 163 164 static void hpet_timer_set_irq(struct hpet_dev *devp) 165 { 166 unsigned long v; 167 int irq, gsi; 168 struct hpet_timer __iomem *timer; 169 170 spin_lock_irq(&hpet_lock); 171 if (devp->hd_hdwirq) { 172 spin_unlock_irq(&hpet_lock); 173 return; 174 } 175 176 timer = devp->hd_timer; 177 178 /* we prefer level triggered mode */ 179 v = readl(&timer->hpet_config); 180 if (!(v & Tn_INT_TYPE_CNF_MASK)) { 181 v |= Tn_INT_TYPE_CNF_MASK; 182 writel(v, &timer->hpet_config); 183 } 184 spin_unlock_irq(&hpet_lock); 185 186 v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> 187 Tn_INT_ROUTE_CAP_SHIFT; 188 189 /* 190 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by 191 * legacy device. In IO APIC mode, we skip all the legacy IRQS. 192 */ 193 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) 194 v &= ~0xf3df; 195 else 196 v &= ~0xffff; 197 198 for_each_set_bit(irq, &v, HPET_MAX_IRQ) { 199 if (irq >= nr_irqs) { 200 irq = HPET_MAX_IRQ; 201 break; 202 } 203 204 gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, 205 ACPI_ACTIVE_LOW); 206 if (gsi > 0) 207 break; 208 209 /* FIXME: Setup interrupt source table */ 210 } 211 212 if (irq < HPET_MAX_IRQ) { 213 spin_lock_irq(&hpet_lock); 214 v = readl(&timer->hpet_config); 215 v |= irq << Tn_INT_ROUTE_CNF_SHIFT; 216 writel(v, &timer->hpet_config); 217 devp->hd_hdwirq = gsi; 218 spin_unlock_irq(&hpet_lock); 219 } 220 return; 221 } 222 223 static int hpet_open(struct inode *inode, struct file *file) 224 { 225 struct hpet_dev *devp; 226 struct hpets *hpetp; 227 int i; 228 229 if (file->f_mode & FMODE_WRITE) 230 return -EINVAL; 231 232 mutex_lock(&hpet_mutex); 233 spin_lock_irq(&hpet_lock); 234 235 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 236 for (i = 0; i < hpetp->hp_ntimer; i++) 237 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) { 238 continue; 239 } else { 240 devp = &hpetp->hp_dev[i]; 241 break; 242 } 243 244 if (!devp) { 245 spin_unlock_irq(&hpet_lock); 246 mutex_unlock(&hpet_mutex); 247 return -EBUSY; 248 } 249 250 file->private_data = devp; 251 devp->hd_irqdata = 0; 252 devp->hd_flags |= HPET_OPEN; 253 spin_unlock_irq(&hpet_lock); 254 mutex_unlock(&hpet_mutex); 255 256 hpet_timer_set_irq(devp); 257 258 return 0; 259 } 260 261 static ssize_t 262 hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) 263 { 264 DECLARE_WAITQUEUE(wait, current); 265 unsigned long data; 266 ssize_t retval; 267 struct hpet_dev *devp; 268 269 devp = file->private_data; 270 if (!devp->hd_ireqfreq) 271 return -EIO; 272 273 if (count < sizeof(unsigned long)) 274 return -EINVAL; 275 276 add_wait_queue(&devp->hd_waitqueue, &wait); 277 278 for ( ; ; ) { 279 set_current_state(TASK_INTERRUPTIBLE); 280 281 spin_lock_irq(&hpet_lock); 282 data = devp->hd_irqdata; 283 devp->hd_irqdata = 0; 284 spin_unlock_irq(&hpet_lock); 285 286 if (data) { 287 break; 288 } else if (file->f_flags & O_NONBLOCK) { 289 retval = -EAGAIN; 290 goto out; 291 } else if (signal_pending(current)) { 292 retval = -ERESTARTSYS; 293 goto out; 294 } 295 schedule(); 296 } 297 298 retval = put_user(data, (unsigned long __user *)buf); 299 if (!retval) 300 retval = sizeof(unsigned long); 301 out: 302 __set_current_state(TASK_RUNNING); 303 remove_wait_queue(&devp->hd_waitqueue, &wait); 304 305 return retval; 306 } 307 308 static __poll_t hpet_poll(struct file *file, poll_table * wait) 309 { 310 unsigned long v; 311 struct hpet_dev *devp; 312 313 devp = file->private_data; 314 315 if (!devp->hd_ireqfreq) 316 return 0; 317 318 poll_wait(file, &devp->hd_waitqueue, wait); 319 320 spin_lock_irq(&hpet_lock); 321 v = devp->hd_irqdata; 322 spin_unlock_irq(&hpet_lock); 323 324 if (v != 0) 325 return EPOLLIN | EPOLLRDNORM; 326 327 return 0; 328 } 329 330 #ifdef CONFIG_HPET_MMAP 331 #ifdef CONFIG_HPET_MMAP_DEFAULT 332 static int hpet_mmap_enabled = 1; 333 #else 334 static int hpet_mmap_enabled = 0; 335 #endif 336 337 static __init int hpet_mmap_enable(char *str) 338 { 339 get_option(&str, &hpet_mmap_enabled); 340 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); 341 return 1; 342 } 343 __setup("hpet_mmap=", hpet_mmap_enable); 344 345 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 346 { 347 struct hpet_dev *devp; 348 unsigned long addr; 349 350 if (!hpet_mmap_enabled) 351 return -EACCES; 352 353 devp = file->private_data; 354 addr = devp->hd_hpets->hp_hpet_phys; 355 356 if (addr & (PAGE_SIZE - 1)) 357 return -ENOSYS; 358 359 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 360 return vm_iomap_memory(vma, addr, PAGE_SIZE); 361 } 362 #else 363 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 364 { 365 return -ENOSYS; 366 } 367 #endif 368 369 static int hpet_fasync(int fd, struct file *file, int on) 370 { 371 struct hpet_dev *devp; 372 373 devp = file->private_data; 374 375 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) 376 return 0; 377 else 378 return -EIO; 379 } 380 381 static int hpet_release(struct inode *inode, struct file *file) 382 { 383 struct hpet_dev *devp; 384 struct hpet_timer __iomem *timer; 385 int irq = 0; 386 387 devp = file->private_data; 388 timer = devp->hd_timer; 389 390 spin_lock_irq(&hpet_lock); 391 392 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 393 &timer->hpet_config); 394 395 irq = devp->hd_irq; 396 devp->hd_irq = 0; 397 398 devp->hd_ireqfreq = 0; 399 400 if (devp->hd_flags & HPET_PERIODIC 401 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 402 unsigned long v; 403 404 v = readq(&timer->hpet_config); 405 v ^= Tn_TYPE_CNF_MASK; 406 writeq(v, &timer->hpet_config); 407 } 408 409 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); 410 spin_unlock_irq(&hpet_lock); 411 412 if (irq) 413 free_irq(irq, devp); 414 415 file->private_data = NULL; 416 return 0; 417 } 418 419 static int hpet_ioctl_ieon(struct hpet_dev *devp) 420 { 421 struct hpet_timer __iomem *timer; 422 struct hpet __iomem *hpet; 423 struct hpets *hpetp; 424 int irq; 425 unsigned long g, v, t, m; 426 unsigned long flags, isr; 427 428 timer = devp->hd_timer; 429 hpet = devp->hd_hpet; 430 hpetp = devp->hd_hpets; 431 432 if (!devp->hd_ireqfreq) 433 return -EIO; 434 435 spin_lock_irq(&hpet_lock); 436 437 if (devp->hd_flags & HPET_IE) { 438 spin_unlock_irq(&hpet_lock); 439 return -EBUSY; 440 } 441 442 devp->hd_flags |= HPET_IE; 443 444 if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) 445 devp->hd_flags |= HPET_SHARED_IRQ; 446 spin_unlock_irq(&hpet_lock); 447 448 irq = devp->hd_hdwirq; 449 450 if (irq) { 451 unsigned long irq_flags; 452 453 if (devp->hd_flags & HPET_SHARED_IRQ) { 454 /* 455 * To prevent the interrupt handler from seeing an 456 * unwanted interrupt status bit, program the timer 457 * so that it will not fire in the near future ... 458 */ 459 writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK, 460 &timer->hpet_config); 461 write_counter(read_counter(&hpet->hpet_mc), 462 &timer->hpet_compare); 463 /* ... and clear any left-over status. */ 464 isr = 1 << (devp - devp->hd_hpets->hp_dev); 465 writel(isr, &hpet->hpet_isr); 466 } 467 468 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 469 irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0; 470 if (request_irq(irq, hpet_interrupt, irq_flags, 471 devp->hd_name, (void *)devp)) { 472 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 473 irq = 0; 474 } 475 } 476 477 if (irq == 0) { 478 spin_lock_irq(&hpet_lock); 479 devp->hd_flags ^= HPET_IE; 480 spin_unlock_irq(&hpet_lock); 481 return -EIO; 482 } 483 484 devp->hd_irq = irq; 485 t = devp->hd_ireqfreq; 486 v = readq(&timer->hpet_config); 487 488 /* 64-bit comparators are not yet supported through the ioctls, 489 * so force this into 32-bit mode if it supports both modes 490 */ 491 g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; 492 493 if (devp->hd_flags & HPET_PERIODIC) { 494 g |= Tn_TYPE_CNF_MASK; 495 v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; 496 writeq(v, &timer->hpet_config); 497 local_irq_save(flags); 498 499 /* 500 * NOTE: First we modify the hidden accumulator 501 * register supported by periodic-capable comparators. 502 * We never want to modify the (single) counter; that 503 * would affect all the comparators. The value written 504 * is the counter value when the first interrupt is due. 505 */ 506 m = read_counter(&hpet->hpet_mc); 507 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 508 /* 509 * Then we modify the comparator, indicating the period 510 * for subsequent interrupt. 511 */ 512 write_counter(t, &timer->hpet_compare); 513 } else { 514 local_irq_save(flags); 515 m = read_counter(&hpet->hpet_mc); 516 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 517 } 518 519 if (devp->hd_flags & HPET_SHARED_IRQ) { 520 isr = 1 << (devp - devp->hd_hpets->hp_dev); 521 writel(isr, &hpet->hpet_isr); 522 } 523 writeq(g, &timer->hpet_config); 524 local_irq_restore(flags); 525 526 return 0; 527 } 528 529 /* converts Hz to number of timer ticks */ 530 static inline unsigned long hpet_time_div(struct hpets *hpets, 531 unsigned long dis) 532 { 533 unsigned long long m; 534 535 m = hpets->hp_tick_freq + (dis >> 1); 536 return div64_ul(m, dis); 537 } 538 539 static int 540 hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, 541 struct hpet_info *info) 542 { 543 struct hpet_timer __iomem *timer; 544 struct hpets *hpetp; 545 int err; 546 unsigned long v; 547 548 switch (cmd) { 549 case HPET_IE_OFF: 550 case HPET_INFO: 551 case HPET_EPI: 552 case HPET_DPI: 553 case HPET_IRQFREQ: 554 timer = devp->hd_timer; 555 hpetp = devp->hd_hpets; 556 break; 557 case HPET_IE_ON: 558 return hpet_ioctl_ieon(devp); 559 default: 560 return -EINVAL; 561 } 562 563 err = 0; 564 565 switch (cmd) { 566 case HPET_IE_OFF: 567 if ((devp->hd_flags & HPET_IE) == 0) 568 break; 569 v = readq(&timer->hpet_config); 570 v &= ~Tn_INT_ENB_CNF_MASK; 571 writeq(v, &timer->hpet_config); 572 if (devp->hd_irq) { 573 free_irq(devp->hd_irq, devp); 574 devp->hd_irq = 0; 575 } 576 devp->hd_flags ^= HPET_IE; 577 break; 578 case HPET_INFO: 579 { 580 memset(info, 0, sizeof(*info)); 581 if (devp->hd_ireqfreq) 582 info->hi_ireqfreq = 583 hpet_time_div(hpetp, devp->hd_ireqfreq); 584 info->hi_flags = 585 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 586 info->hi_hpet = hpetp->hp_which; 587 info->hi_timer = devp - hpetp->hp_dev; 588 break; 589 } 590 case HPET_EPI: 591 v = readq(&timer->hpet_config); 592 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 593 err = -ENXIO; 594 break; 595 } 596 devp->hd_flags |= HPET_PERIODIC; 597 break; 598 case HPET_DPI: 599 v = readq(&timer->hpet_config); 600 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 601 err = -ENXIO; 602 break; 603 } 604 if (devp->hd_flags & HPET_PERIODIC && 605 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 606 v = readq(&timer->hpet_config); 607 v ^= Tn_TYPE_CNF_MASK; 608 writeq(v, &timer->hpet_config); 609 } 610 devp->hd_flags &= ~HPET_PERIODIC; 611 break; 612 case HPET_IRQFREQ: 613 if ((arg > hpet_max_freq) && 614 !capable(CAP_SYS_RESOURCE)) { 615 err = -EACCES; 616 break; 617 } 618 619 if (!arg) { 620 err = -EINVAL; 621 break; 622 } 623 624 devp->hd_ireqfreq = hpet_time_div(hpetp, arg); 625 } 626 627 return err; 628 } 629 630 static long 631 hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 632 { 633 struct hpet_info info; 634 int err; 635 636 mutex_lock(&hpet_mutex); 637 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 638 mutex_unlock(&hpet_mutex); 639 640 if ((cmd == HPET_INFO) && !err && 641 (copy_to_user((void __user *)arg, &info, sizeof(info)))) 642 err = -EFAULT; 643 644 return err; 645 } 646 647 #ifdef CONFIG_COMPAT 648 struct compat_hpet_info { 649 compat_ulong_t hi_ireqfreq; /* Hz */ 650 compat_ulong_t hi_flags; /* information */ 651 unsigned short hi_hpet; 652 unsigned short hi_timer; 653 }; 654 655 static long 656 hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 657 { 658 struct hpet_info info; 659 int err; 660 661 mutex_lock(&hpet_mutex); 662 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 663 mutex_unlock(&hpet_mutex); 664 665 if ((cmd == HPET_INFO) && !err) { 666 struct compat_hpet_info __user *u = compat_ptr(arg); 667 if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || 668 put_user(info.hi_flags, &u->hi_flags) || 669 put_user(info.hi_hpet, &u->hi_hpet) || 670 put_user(info.hi_timer, &u->hi_timer)) 671 err = -EFAULT; 672 } 673 674 return err; 675 } 676 #endif 677 678 static const struct file_operations hpet_fops = { 679 .owner = THIS_MODULE, 680 .llseek = no_llseek, 681 .read = hpet_read, 682 .poll = hpet_poll, 683 .unlocked_ioctl = hpet_ioctl, 684 #ifdef CONFIG_COMPAT 685 .compat_ioctl = hpet_compat_ioctl, 686 #endif 687 .open = hpet_open, 688 .release = hpet_release, 689 .fasync = hpet_fasync, 690 .mmap = hpet_mmap, 691 }; 692 693 static int hpet_is_known(struct hpet_data *hdp) 694 { 695 struct hpets *hpetp; 696 697 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 698 if (hpetp->hp_hpet_phys == hdp->hd_phys_address) 699 return 1; 700 701 return 0; 702 } 703 704 static struct ctl_table hpet_table[] = { 705 { 706 .procname = "max-user-freq", 707 .data = &hpet_max_freq, 708 .maxlen = sizeof(int), 709 .mode = 0644, 710 .proc_handler = proc_dointvec, 711 }, 712 }; 713 714 static struct ctl_table_header *sysctl_header; 715 716 /* 717 * Adjustment for when arming the timer with 718 * initial conditions. That is, main counter 719 * ticks expired before interrupts are enabled. 720 */ 721 #define TICK_CALIBRATE (1000UL) 722 723 static unsigned long __hpet_calibrate(struct hpets *hpetp) 724 { 725 struct hpet_timer __iomem *timer = NULL; 726 unsigned long t, m, count, i, flags, start; 727 struct hpet_dev *devp; 728 int j; 729 struct hpet __iomem *hpet; 730 731 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) 732 if ((devp->hd_flags & HPET_OPEN) == 0) { 733 timer = devp->hd_timer; 734 break; 735 } 736 737 if (!timer) 738 return 0; 739 740 hpet = hpetp->hp_hpet; 741 t = read_counter(&timer->hpet_compare); 742 743 i = 0; 744 count = hpet_time_div(hpetp, TICK_CALIBRATE); 745 746 local_irq_save(flags); 747 748 start = read_counter(&hpet->hpet_mc); 749 750 do { 751 m = read_counter(&hpet->hpet_mc); 752 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 753 } while (i++, (m - start) < count); 754 755 local_irq_restore(flags); 756 757 return (m - start) / i; 758 } 759 760 static unsigned long hpet_calibrate(struct hpets *hpetp) 761 { 762 unsigned long ret = ~0UL; 763 unsigned long tmp; 764 765 /* 766 * Try to calibrate until return value becomes stable small value. 767 * If SMI interruption occurs in calibration loop, the return value 768 * will be big. This avoids its impact. 769 */ 770 for ( ; ; ) { 771 tmp = __hpet_calibrate(hpetp); 772 if (ret <= tmp) 773 break; 774 ret = tmp; 775 } 776 777 return ret; 778 } 779 780 int hpet_alloc(struct hpet_data *hdp) 781 { 782 u64 cap, mcfg; 783 struct hpet_dev *devp; 784 u32 i, ntimer; 785 struct hpets *hpetp; 786 struct hpet __iomem *hpet; 787 static struct hpets *last; 788 unsigned long period; 789 unsigned long long temp; 790 u32 remainder; 791 792 /* 793 * hpet_alloc can be called by platform dependent code. 794 * If platform dependent code has allocated the hpet that 795 * ACPI has also reported, then we catch it here. 796 */ 797 if (hpet_is_known(hdp)) { 798 printk(KERN_DEBUG "%s: duplicate HPET ignored\n", 799 __func__); 800 return 0; 801 } 802 803 hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs), 804 GFP_KERNEL); 805 806 if (!hpetp) 807 return -ENOMEM; 808 809 hpetp->hp_which = hpet_nhpet++; 810 hpetp->hp_hpet = hdp->hd_address; 811 hpetp->hp_hpet_phys = hdp->hd_phys_address; 812 813 hpetp->hp_ntimer = hdp->hd_nirqs; 814 815 for (i = 0; i < hdp->hd_nirqs; i++) 816 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 817 818 hpet = hpetp->hp_hpet; 819 820 cap = readq(&hpet->hpet_cap); 821 822 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; 823 824 if (hpetp->hp_ntimer != ntimer) { 825 printk(KERN_WARNING "hpet: number irqs doesn't agree" 826 " with number of timers\n"); 827 kfree(hpetp); 828 return -ENODEV; 829 } 830 831 if (last) 832 last->hp_next = hpetp; 833 else 834 hpets = hpetp; 835 836 last = hpetp; 837 838 period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 839 HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ 840 temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ 841 temp += period >> 1; /* round */ 842 do_div(temp, period); 843 hpetp->hp_tick_freq = temp; /* ticks per second */ 844 845 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 846 hpetp->hp_which, hdp->hd_phys_address, 847 hpetp->hp_ntimer > 1 ? "s" : ""); 848 for (i = 0; i < hpetp->hp_ntimer; i++) 849 printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 850 printk(KERN_CONT "\n"); 851 852 temp = hpetp->hp_tick_freq; 853 remainder = do_div(temp, 1000000); 854 printk(KERN_INFO 855 "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", 856 hpetp->hp_which, hpetp->hp_ntimer, 857 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, 858 (unsigned) temp, remainder); 859 860 mcfg = readq(&hpet->hpet_config); 861 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 862 write_counter(0L, &hpet->hpet_mc); 863 mcfg |= HPET_ENABLE_CNF_MASK; 864 writeq(mcfg, &hpet->hpet_config); 865 } 866 867 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { 868 struct hpet_timer __iomem *timer; 869 870 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 871 872 devp->hd_hpets = hpetp; 873 devp->hd_hpet = hpet; 874 devp->hd_timer = timer; 875 876 /* 877 * If the timer was reserved by platform code, 878 * then make timer unavailable for opens. 879 */ 880 if (hdp->hd_state & (1 << i)) { 881 devp->hd_flags = HPET_OPEN; 882 continue; 883 } 884 885 init_waitqueue_head(&devp->hd_waitqueue); 886 } 887 888 hpetp->hp_delta = hpet_calibrate(hpetp); 889 890 return 0; 891 } 892 893 static acpi_status hpet_resources(struct acpi_resource *res, void *data) 894 { 895 struct hpet_data *hdp; 896 acpi_status status; 897 struct acpi_resource_address64 addr; 898 899 hdp = data; 900 901 status = acpi_resource_to_address64(res, &addr); 902 903 if (ACPI_SUCCESS(status)) { 904 hdp->hd_phys_address = addr.address.minimum; 905 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); 906 if (!hdp->hd_address) 907 return AE_ERROR; 908 909 if (hpet_is_known(hdp)) { 910 iounmap(hdp->hd_address); 911 return AE_ALREADY_EXISTS; 912 } 913 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 914 struct acpi_resource_fixed_memory32 *fixmem32; 915 916 fixmem32 = &res->data.fixed_memory32; 917 918 hdp->hd_phys_address = fixmem32->address; 919 hdp->hd_address = ioremap(fixmem32->address, 920 HPET_RANGE_SIZE); 921 if (!hdp->hd_address) 922 return AE_ERROR; 923 924 if (hpet_is_known(hdp)) { 925 iounmap(hdp->hd_address); 926 return AE_ALREADY_EXISTS; 927 } 928 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 929 struct acpi_resource_extended_irq *irqp; 930 int i, irq; 931 932 irqp = &res->data.extended_irq; 933 934 for (i = 0; i < irqp->interrupt_count; i++) { 935 if (hdp->hd_nirqs >= HPET_MAX_TIMERS) 936 break; 937 938 irq = acpi_register_gsi(NULL, irqp->interrupts[i], 939 irqp->triggering, 940 irqp->polarity); 941 if (irq < 0) 942 return AE_ERROR; 943 944 hdp->hd_irq[hdp->hd_nirqs] = irq; 945 hdp->hd_nirqs++; 946 } 947 } 948 949 return AE_OK; 950 } 951 952 static int hpet_acpi_add(struct acpi_device *device) 953 { 954 acpi_status result; 955 struct hpet_data data; 956 957 memset(&data, 0, sizeof(data)); 958 959 result = 960 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 961 hpet_resources, &data); 962 963 if (ACPI_FAILURE(result)) 964 return -ENODEV; 965 966 if (!data.hd_address || !data.hd_nirqs) { 967 if (data.hd_address) 968 iounmap(data.hd_address); 969 printk("%s: no address or irqs in _CRS\n", __func__); 970 return -ENODEV; 971 } 972 973 return hpet_alloc(&data); 974 } 975 976 static const struct acpi_device_id hpet_device_ids[] = { 977 {"PNP0103", 0}, 978 {"", 0}, 979 }; 980 981 static struct acpi_driver hpet_acpi_driver = { 982 .name = "hpet", 983 .ids = hpet_device_ids, 984 .ops = { 985 .add = hpet_acpi_add, 986 }, 987 }; 988 989 static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; 990 991 static int __init hpet_init(void) 992 { 993 int result; 994 995 result = misc_register(&hpet_misc); 996 if (result < 0) 997 return -ENODEV; 998 999 sysctl_header = register_sysctl("dev/hpet", hpet_table); 1000 1001 result = acpi_bus_register_driver(&hpet_acpi_driver); 1002 if (result < 0) { 1003 if (sysctl_header) 1004 unregister_sysctl_table(sysctl_header); 1005 misc_deregister(&hpet_misc); 1006 return result; 1007 } 1008 1009 return 0; 1010 } 1011 device_initcall(hpet_init); 1012 1013 /* 1014 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 1015 MODULE_LICENSE("GPL"); 1016 */ 1017