1 /* 2 * linux/drivers/clocksource/arm_arch_timer.c 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/clockchips.h> 18 #include <linux/clocksource.h> 19 #include <linux/interrupt.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_address.h> 22 #include <linux/io.h> 23 #include <linux/slab.h> 24 #include <linux/sched_clock.h> 25 26 #include <asm/arch_timer.h> 27 #include <asm/virt.h> 28 29 #include <clocksource/arm_arch_timer.h> 30 31 #define CNTTIDR 0x08 32 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 33 34 #define CNTVCT_LO 0x08 35 #define CNTVCT_HI 0x0c 36 #define CNTFRQ 0x10 37 #define CNTP_TVAL 0x28 38 #define CNTP_CTL 0x2c 39 #define CNTV_TVAL 0x38 40 #define CNTV_CTL 0x3c 41 42 #define ARCH_CP15_TIMER BIT(0) 43 #define ARCH_MEM_TIMER BIT(1) 44 static unsigned arch_timers_present __initdata; 45 46 static void __iomem *arch_counter_base; 47 48 struct arch_timer { 49 void __iomem *base; 50 struct clock_event_device evt; 51 }; 52 53 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 54 55 static u32 arch_timer_rate; 56 57 enum ppi_nr { 58 PHYS_SECURE_PPI, 59 PHYS_NONSECURE_PPI, 60 VIRT_PPI, 61 HYP_PPI, 62 MAX_TIMER_PPI 63 }; 64 65 static int arch_timer_ppi[MAX_TIMER_PPI]; 66 67 static struct clock_event_device __percpu *arch_timer_evt; 68 69 static bool arch_timer_use_virtual = true; 70 static bool arch_timer_c3stop; 71 static bool arch_timer_mem_use_virtual; 72 73 /* 74 * Architected system timer support. 75 */ 76 77 static __always_inline 78 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 79 struct clock_event_device *clk) 80 { 81 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 82 struct arch_timer *timer = to_arch_timer(clk); 83 switch (reg) { 84 case ARCH_TIMER_REG_CTRL: 85 writel_relaxed(val, timer->base + CNTP_CTL); 86 break; 87 case ARCH_TIMER_REG_TVAL: 88 writel_relaxed(val, timer->base + CNTP_TVAL); 89 break; 90 } 91 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 92 struct arch_timer *timer = to_arch_timer(clk); 93 switch (reg) { 94 case ARCH_TIMER_REG_CTRL: 95 writel_relaxed(val, timer->base + CNTV_CTL); 96 break; 97 case ARCH_TIMER_REG_TVAL: 98 writel_relaxed(val, timer->base + CNTV_TVAL); 99 break; 100 } 101 } else { 102 arch_timer_reg_write_cp15(access, reg, val); 103 } 104 } 105 106 static __always_inline 107 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 108 struct clock_event_device *clk) 109 { 110 u32 val; 111 112 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 113 struct arch_timer *timer = to_arch_timer(clk); 114 switch (reg) { 115 case ARCH_TIMER_REG_CTRL: 116 val = readl_relaxed(timer->base + CNTP_CTL); 117 break; 118 case ARCH_TIMER_REG_TVAL: 119 val = readl_relaxed(timer->base + CNTP_TVAL); 120 break; 121 } 122 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 123 struct arch_timer *timer = to_arch_timer(clk); 124 switch (reg) { 125 case ARCH_TIMER_REG_CTRL: 126 val = readl_relaxed(timer->base + CNTV_CTL); 127 break; 128 case ARCH_TIMER_REG_TVAL: 129 val = readl_relaxed(timer->base + CNTV_TVAL); 130 break; 131 } 132 } else { 133 val = arch_timer_reg_read_cp15(access, reg); 134 } 135 136 return val; 137 } 138 139 static __always_inline irqreturn_t timer_handler(const int access, 140 struct clock_event_device *evt) 141 { 142 unsigned long ctrl; 143 144 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 145 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 146 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 147 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 148 evt->event_handler(evt); 149 return IRQ_HANDLED; 150 } 151 152 return IRQ_NONE; 153 } 154 155 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 156 { 157 struct clock_event_device *evt = dev_id; 158 159 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 160 } 161 162 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 163 { 164 struct clock_event_device *evt = dev_id; 165 166 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 167 } 168 169 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 170 { 171 struct clock_event_device *evt = dev_id; 172 173 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 174 } 175 176 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 177 { 178 struct clock_event_device *evt = dev_id; 179 180 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 181 } 182 183 static __always_inline void timer_set_mode(const int access, int mode, 184 struct clock_event_device *clk) 185 { 186 unsigned long ctrl; 187 switch (mode) { 188 case CLOCK_EVT_MODE_UNUSED: 189 case CLOCK_EVT_MODE_SHUTDOWN: 190 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 191 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 192 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 193 break; 194 default: 195 break; 196 } 197 } 198 199 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 200 struct clock_event_device *clk) 201 { 202 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); 203 } 204 205 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 206 struct clock_event_device *clk) 207 { 208 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); 209 } 210 211 static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, 212 struct clock_event_device *clk) 213 { 214 timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); 215 } 216 217 static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, 218 struct clock_event_device *clk) 219 { 220 timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); 221 } 222 223 static __always_inline void set_next_event(const int access, unsigned long evt, 224 struct clock_event_device *clk) 225 { 226 unsigned long ctrl; 227 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 228 ctrl |= ARCH_TIMER_CTRL_ENABLE; 229 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 230 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 231 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 232 } 233 234 static int arch_timer_set_next_event_virt(unsigned long evt, 235 struct clock_event_device *clk) 236 { 237 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 238 return 0; 239 } 240 241 static int arch_timer_set_next_event_phys(unsigned long evt, 242 struct clock_event_device *clk) 243 { 244 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 245 return 0; 246 } 247 248 static int arch_timer_set_next_event_virt_mem(unsigned long evt, 249 struct clock_event_device *clk) 250 { 251 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 252 return 0; 253 } 254 255 static int arch_timer_set_next_event_phys_mem(unsigned long evt, 256 struct clock_event_device *clk) 257 { 258 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 259 return 0; 260 } 261 262 static void __arch_timer_setup(unsigned type, 263 struct clock_event_device *clk) 264 { 265 clk->features = CLOCK_EVT_FEAT_ONESHOT; 266 267 if (type == ARCH_CP15_TIMER) { 268 if (arch_timer_c3stop) 269 clk->features |= CLOCK_EVT_FEAT_C3STOP; 270 clk->name = "arch_sys_timer"; 271 clk->rating = 450; 272 clk->cpumask = cpumask_of(smp_processor_id()); 273 if (arch_timer_use_virtual) { 274 clk->irq = arch_timer_ppi[VIRT_PPI]; 275 clk->set_mode = arch_timer_set_mode_virt; 276 clk->set_next_event = arch_timer_set_next_event_virt; 277 } else { 278 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 279 clk->set_mode = arch_timer_set_mode_phys; 280 clk->set_next_event = arch_timer_set_next_event_phys; 281 } 282 } else { 283 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 284 clk->name = "arch_mem_timer"; 285 clk->rating = 400; 286 clk->cpumask = cpu_all_mask; 287 if (arch_timer_mem_use_virtual) { 288 clk->set_mode = arch_timer_set_mode_virt_mem; 289 clk->set_next_event = 290 arch_timer_set_next_event_virt_mem; 291 } else { 292 clk->set_mode = arch_timer_set_mode_phys_mem; 293 clk->set_next_event = 294 arch_timer_set_next_event_phys_mem; 295 } 296 } 297 298 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); 299 300 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 301 } 302 303 static void arch_timer_evtstrm_enable(int divider) 304 { 305 u32 cntkctl = arch_timer_get_cntkctl(); 306 307 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; 308 /* Set the divider and enable virtual event stream */ 309 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) 310 | ARCH_TIMER_VIRT_EVT_EN; 311 arch_timer_set_cntkctl(cntkctl); 312 elf_hwcap |= HWCAP_EVTSTRM; 313 #ifdef CONFIG_COMPAT 314 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; 315 #endif 316 } 317 318 static void arch_timer_configure_evtstream(void) 319 { 320 int evt_stream_div, pos; 321 322 /* Find the closest power of two to the divisor */ 323 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; 324 pos = fls(evt_stream_div); 325 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) 326 pos--; 327 /* enable event stream */ 328 arch_timer_evtstrm_enable(min(pos, 15)); 329 } 330 331 static void arch_counter_set_user_access(void) 332 { 333 u32 cntkctl = arch_timer_get_cntkctl(); 334 335 /* Disable user access to the timers and the physical counter */ 336 /* Also disable virtual event stream */ 337 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN 338 | ARCH_TIMER_USR_VT_ACCESS_EN 339 | ARCH_TIMER_VIRT_EVT_EN 340 | ARCH_TIMER_USR_PCT_ACCESS_EN); 341 342 /* Enable user access to the virtual counter */ 343 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; 344 345 arch_timer_set_cntkctl(cntkctl); 346 } 347 348 static int arch_timer_setup(struct clock_event_device *clk) 349 { 350 __arch_timer_setup(ARCH_CP15_TIMER, clk); 351 352 if (arch_timer_use_virtual) 353 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); 354 else { 355 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); 356 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 357 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 358 } 359 360 arch_counter_set_user_access(); 361 if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) 362 arch_timer_configure_evtstream(); 363 364 return 0; 365 } 366 367 static void 368 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) 369 { 370 /* Who has more than one independent system counter? */ 371 if (arch_timer_rate) 372 return; 373 374 /* Try to determine the frequency from the device tree or CNTFRQ */ 375 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { 376 if (cntbase) 377 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); 378 else 379 arch_timer_rate = arch_timer_get_cntfrq(); 380 } 381 382 /* Check the timer frequency. */ 383 if (arch_timer_rate == 0) 384 pr_warn("Architected timer frequency not available\n"); 385 } 386 387 static void arch_timer_banner(unsigned type) 388 { 389 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 390 type & ARCH_CP15_TIMER ? "cp15" : "", 391 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", 392 type & ARCH_MEM_TIMER ? "mmio" : "", 393 (unsigned long)arch_timer_rate / 1000000, 394 (unsigned long)(arch_timer_rate / 10000) % 100, 395 type & ARCH_CP15_TIMER ? 396 arch_timer_use_virtual ? "virt" : "phys" : 397 "", 398 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", 399 type & ARCH_MEM_TIMER ? 400 arch_timer_mem_use_virtual ? "virt" : "phys" : 401 ""); 402 } 403 404 u32 arch_timer_get_rate(void) 405 { 406 return arch_timer_rate; 407 } 408 409 static u64 arch_counter_get_cntvct_mem(void) 410 { 411 u32 vct_lo, vct_hi, tmp_hi; 412 413 do { 414 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 415 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 416 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 417 } while (vct_hi != tmp_hi); 418 419 return ((u64) vct_hi << 32) | vct_lo; 420 } 421 422 /* 423 * Default to cp15 based access because arm64 uses this function for 424 * sched_clock() before DT is probed and the cp15 method is guaranteed 425 * to exist on arm64. arm doesn't use this before DT is probed so even 426 * if we don't have the cp15 accessors we won't have a problem. 427 */ 428 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 429 430 static cycle_t arch_counter_read(struct clocksource *cs) 431 { 432 return arch_timer_read_counter(); 433 } 434 435 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 436 { 437 return arch_timer_read_counter(); 438 } 439 440 static struct clocksource clocksource_counter = { 441 .name = "arch_sys_counter", 442 .rating = 400, 443 .read = arch_counter_read, 444 .mask = CLOCKSOURCE_MASK(56), 445 .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, 446 }; 447 448 static struct cyclecounter cyclecounter = { 449 .read = arch_counter_read_cc, 450 .mask = CLOCKSOURCE_MASK(56), 451 }; 452 453 static struct timecounter timecounter; 454 455 struct timecounter *arch_timer_get_timecounter(void) 456 { 457 return &timecounter; 458 } 459 460 static void __init arch_counter_register(unsigned type) 461 { 462 u64 start_count; 463 464 /* Register the CP15 based counter if we have one */ 465 if (type & ARCH_CP15_TIMER) { 466 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual) 467 arch_timer_read_counter = arch_counter_get_cntvct; 468 else 469 arch_timer_read_counter = arch_counter_get_cntpct; 470 } else { 471 arch_timer_read_counter = arch_counter_get_cntvct_mem; 472 473 /* If the clocksource name is "arch_sys_counter" the 474 * VDSO will attempt to read the CP15-based counter. 475 * Ensure this does not happen when CP15-based 476 * counter is not available. 477 */ 478 clocksource_counter.name = "arch_mem_counter"; 479 } 480 481 start_count = arch_timer_read_counter(); 482 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 483 cyclecounter.mult = clocksource_counter.mult; 484 cyclecounter.shift = clocksource_counter.shift; 485 timecounter_init(&timecounter, &cyclecounter, start_count); 486 487 /* 56 bits minimum, so we assume worst case rollover */ 488 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 489 } 490 491 static void arch_timer_stop(struct clock_event_device *clk) 492 { 493 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 494 clk->irq, smp_processor_id()); 495 496 if (arch_timer_use_virtual) 497 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); 498 else { 499 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); 500 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 501 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 502 } 503 504 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 505 } 506 507 static int arch_timer_cpu_notify(struct notifier_block *self, 508 unsigned long action, void *hcpu) 509 { 510 /* 511 * Grab cpu pointer in each case to avoid spurious 512 * preemptible warnings 513 */ 514 switch (action & ~CPU_TASKS_FROZEN) { 515 case CPU_STARTING: 516 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 517 break; 518 case CPU_DYING: 519 arch_timer_stop(this_cpu_ptr(arch_timer_evt)); 520 break; 521 } 522 523 return NOTIFY_OK; 524 } 525 526 static struct notifier_block arch_timer_cpu_nb = { 527 .notifier_call = arch_timer_cpu_notify, 528 }; 529 530 #ifdef CONFIG_CPU_PM 531 static unsigned int saved_cntkctl; 532 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 533 unsigned long action, void *hcpu) 534 { 535 if (action == CPU_PM_ENTER) 536 saved_cntkctl = arch_timer_get_cntkctl(); 537 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) 538 arch_timer_set_cntkctl(saved_cntkctl); 539 return NOTIFY_OK; 540 } 541 542 static struct notifier_block arch_timer_cpu_pm_notifier = { 543 .notifier_call = arch_timer_cpu_pm_notify, 544 }; 545 546 static int __init arch_timer_cpu_pm_init(void) 547 { 548 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 549 } 550 #else 551 static int __init arch_timer_cpu_pm_init(void) 552 { 553 return 0; 554 } 555 #endif 556 557 static int __init arch_timer_register(void) 558 { 559 int err; 560 int ppi; 561 562 arch_timer_evt = alloc_percpu(struct clock_event_device); 563 if (!arch_timer_evt) { 564 err = -ENOMEM; 565 goto out; 566 } 567 568 if (arch_timer_use_virtual) { 569 ppi = arch_timer_ppi[VIRT_PPI]; 570 err = request_percpu_irq(ppi, arch_timer_handler_virt, 571 "arch_timer", arch_timer_evt); 572 } else { 573 ppi = arch_timer_ppi[PHYS_SECURE_PPI]; 574 err = request_percpu_irq(ppi, arch_timer_handler_phys, 575 "arch_timer", arch_timer_evt); 576 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 577 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 578 err = request_percpu_irq(ppi, arch_timer_handler_phys, 579 "arch_timer", arch_timer_evt); 580 if (err) 581 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 582 arch_timer_evt); 583 } 584 } 585 586 if (err) { 587 pr_err("arch_timer: can't register interrupt %d (%d)\n", 588 ppi, err); 589 goto out_free; 590 } 591 592 err = register_cpu_notifier(&arch_timer_cpu_nb); 593 if (err) 594 goto out_free_irq; 595 596 err = arch_timer_cpu_pm_init(); 597 if (err) 598 goto out_unreg_notify; 599 600 /* Immediately configure the timer on the boot CPU */ 601 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 602 603 return 0; 604 605 out_unreg_notify: 606 unregister_cpu_notifier(&arch_timer_cpu_nb); 607 out_free_irq: 608 if (arch_timer_use_virtual) 609 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); 610 else { 611 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 612 arch_timer_evt); 613 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 614 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 615 arch_timer_evt); 616 } 617 618 out_free: 619 free_percpu(arch_timer_evt); 620 out: 621 return err; 622 } 623 624 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 625 { 626 int ret; 627 irq_handler_t func; 628 struct arch_timer *t; 629 630 t = kzalloc(sizeof(*t), GFP_KERNEL); 631 if (!t) 632 return -ENOMEM; 633 634 t->base = base; 635 t->evt.irq = irq; 636 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); 637 638 if (arch_timer_mem_use_virtual) 639 func = arch_timer_handler_virt_mem; 640 else 641 func = arch_timer_handler_phys_mem; 642 643 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 644 if (ret) { 645 pr_err("arch_timer: Failed to request mem timer irq\n"); 646 kfree(t); 647 } 648 649 return ret; 650 } 651 652 static const struct of_device_id arch_timer_of_match[] __initconst = { 653 { .compatible = "arm,armv7-timer", }, 654 { .compatible = "arm,armv8-timer", }, 655 {}, 656 }; 657 658 static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 659 { .compatible = "arm,armv7-timer-mem", }, 660 {}, 661 }; 662 663 static bool __init 664 arch_timer_probed(int type, const struct of_device_id *matches) 665 { 666 struct device_node *dn; 667 bool probed = true; 668 669 dn = of_find_matching_node(NULL, matches); 670 if (dn && of_device_is_available(dn) && !(arch_timers_present & type)) 671 probed = false; 672 of_node_put(dn); 673 674 return probed; 675 } 676 677 static void __init arch_timer_common_init(void) 678 { 679 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 680 681 /* Wait until both nodes are probed if we have two timers */ 682 if ((arch_timers_present & mask) != mask) { 683 if (!arch_timer_probed(ARCH_MEM_TIMER, arch_timer_mem_of_match)) 684 return; 685 if (!arch_timer_probed(ARCH_CP15_TIMER, arch_timer_of_match)) 686 return; 687 } 688 689 arch_timer_banner(arch_timers_present); 690 arch_counter_register(arch_timers_present); 691 arch_timer_arch_init(); 692 } 693 694 static void __init arch_timer_init(struct device_node *np) 695 { 696 int i; 697 698 if (arch_timers_present & ARCH_CP15_TIMER) { 699 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 700 return; 701 } 702 703 arch_timers_present |= ARCH_CP15_TIMER; 704 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 705 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 706 arch_timer_detect_rate(NULL, np); 707 708 /* 709 * If we cannot rely on firmware initializing the timer registers then 710 * we should use the physical timers instead. 711 */ 712 if (IS_ENABLED(CONFIG_ARM) && 713 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 714 arch_timer_use_virtual = false; 715 716 /* 717 * If HYP mode is available, we know that the physical timer 718 * has been configured to be accessible from PL1. Use it, so 719 * that a guest can use the virtual timer instead. 720 * 721 * If no interrupt provided for virtual timer, we'll have to 722 * stick to the physical timer. It'd better be accessible... 723 */ 724 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { 725 arch_timer_use_virtual = false; 726 727 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 728 !arch_timer_ppi[PHYS_NONSECURE_PPI]) { 729 pr_warn("arch_timer: No interrupt available, giving up\n"); 730 return; 731 } 732 } 733 734 arch_timer_c3stop = !of_property_read_bool(np, "always-on"); 735 736 arch_timer_register(); 737 arch_timer_common_init(); 738 } 739 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); 740 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); 741 742 static void __init arch_timer_mem_init(struct device_node *np) 743 { 744 struct device_node *frame, *best_frame = NULL; 745 void __iomem *cntctlbase, *base; 746 unsigned int irq; 747 u32 cnttidr; 748 749 arch_timers_present |= ARCH_MEM_TIMER; 750 cntctlbase = of_iomap(np, 0); 751 if (!cntctlbase) { 752 pr_err("arch_timer: Can't find CNTCTLBase\n"); 753 return; 754 } 755 756 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 757 iounmap(cntctlbase); 758 759 /* 760 * Try to find a virtual capable frame. Otherwise fall back to a 761 * physical capable frame. 762 */ 763 for_each_available_child_of_node(np, frame) { 764 int n; 765 766 if (of_property_read_u32(frame, "frame-number", &n)) { 767 pr_err("arch_timer: Missing frame-number\n"); 768 of_node_put(best_frame); 769 of_node_put(frame); 770 return; 771 } 772 773 if (cnttidr & CNTTIDR_VIRT(n)) { 774 of_node_put(best_frame); 775 best_frame = frame; 776 arch_timer_mem_use_virtual = true; 777 break; 778 } 779 of_node_put(best_frame); 780 best_frame = of_node_get(frame); 781 } 782 783 base = arch_counter_base = of_iomap(best_frame, 0); 784 if (!base) { 785 pr_err("arch_timer: Can't map frame's registers\n"); 786 of_node_put(best_frame); 787 return; 788 } 789 790 if (arch_timer_mem_use_virtual) 791 irq = irq_of_parse_and_map(best_frame, 1); 792 else 793 irq = irq_of_parse_and_map(best_frame, 0); 794 of_node_put(best_frame); 795 if (!irq) { 796 pr_err("arch_timer: Frame missing %s irq", 797 arch_timer_mem_use_virtual ? "virt" : "phys"); 798 return; 799 } 800 801 arch_timer_detect_rate(base, np); 802 arch_timer_mem_register(base, irq); 803 arch_timer_common_init(); 804 } 805 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 806 arch_timer_mem_init); 807