1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/drivers/clocksource/arm_arch_timer.c 4 * 5 * Copyright (C) 2011 ARM Ltd. 6 * All Rights Reserved 7 */ 8 9 #define pr_fmt(fmt) "arch_timer: " fmt 10 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/clockchips.h> 18 #include <linux/clocksource.h> 19 #include <linux/interrupt.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_address.h> 22 #include <linux/io.h> 23 #include <linux/slab.h> 24 #include <linux/sched/clock.h> 25 #include <linux/sched_clock.h> 26 #include <linux/acpi.h> 27 28 #include <asm/arch_timer.h> 29 #include <asm/virt.h> 30 31 #include <clocksource/arm_arch_timer.h> 32 33 #define CNTTIDR 0x08 34 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 35 36 #define CNTACR(n) (0x40 + ((n) * 4)) 37 #define CNTACR_RPCT BIT(0) 38 #define CNTACR_RVCT BIT(1) 39 #define CNTACR_RFRQ BIT(2) 40 #define CNTACR_RVOFF BIT(3) 41 #define CNTACR_RWVT BIT(4) 42 #define CNTACR_RWPT BIT(5) 43 44 #define CNTVCT_LO 0x08 45 #define CNTVCT_HI 0x0c 46 #define CNTFRQ 0x10 47 #define CNTP_TVAL 0x28 48 #define CNTP_CTL 0x2c 49 #define CNTV_TVAL 0x38 50 #define CNTV_CTL 0x3c 51 52 static unsigned arch_timers_present __initdata; 53 54 static void __iomem *arch_counter_base __ro_after_init; 55 56 struct arch_timer { 57 void __iomem *base; 58 struct clock_event_device evt; 59 }; 60 61 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 62 63 static u32 arch_timer_rate __ro_after_init; 64 u32 arch_timer_rate1 __ro_after_init; 65 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; 66 67 static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = { 68 [ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys", 69 [ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys", 70 [ARCH_TIMER_VIRT_PPI] = "virt", 71 [ARCH_TIMER_HYP_PPI] = "hyp-phys", 72 [ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt", 73 }; 74 75 static struct clock_event_device __percpu *arch_timer_evt; 76 77 static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; 78 static bool arch_timer_c3stop __ro_after_init; 79 static bool arch_timer_mem_use_virtual __ro_after_init; 80 static bool arch_counter_suspend_stop __ro_after_init; 81 #ifdef CONFIG_GENERIC_GETTIMEOFDAY 82 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; 83 #else 84 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; 85 #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 86 87 static cpumask_t evtstrm_available = CPU_MASK_NONE; 88 static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); 89 90 static int __init early_evtstrm_cfg(char *buf) 91 { 92 return strtobool(buf, &evtstrm_enable); 93 } 94 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); 95 96 /* 97 * Architected system timer support. 98 */ 99 100 static __always_inline 101 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 102 struct clock_event_device *clk) 103 { 104 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 105 struct arch_timer *timer = to_arch_timer(clk); 106 switch (reg) { 107 case ARCH_TIMER_REG_CTRL: 108 writel_relaxed(val, timer->base + CNTP_CTL); 109 break; 110 case ARCH_TIMER_REG_TVAL: 111 writel_relaxed(val, timer->base + CNTP_TVAL); 112 break; 113 } 114 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 115 struct arch_timer *timer = to_arch_timer(clk); 116 switch (reg) { 117 case ARCH_TIMER_REG_CTRL: 118 writel_relaxed(val, timer->base + CNTV_CTL); 119 break; 120 case ARCH_TIMER_REG_TVAL: 121 writel_relaxed(val, timer->base + CNTV_TVAL); 122 break; 123 } 124 } else { 125 arch_timer_reg_write_cp15(access, reg, val); 126 } 127 } 128 129 static __always_inline 130 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 131 struct clock_event_device *clk) 132 { 133 u32 val; 134 135 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 136 struct arch_timer *timer = to_arch_timer(clk); 137 switch (reg) { 138 case ARCH_TIMER_REG_CTRL: 139 val = readl_relaxed(timer->base + CNTP_CTL); 140 break; 141 case ARCH_TIMER_REG_TVAL: 142 val = readl_relaxed(timer->base + CNTP_TVAL); 143 break; 144 } 145 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 146 struct arch_timer *timer = to_arch_timer(clk); 147 switch (reg) { 148 case ARCH_TIMER_REG_CTRL: 149 val = readl_relaxed(timer->base + CNTV_CTL); 150 break; 151 case ARCH_TIMER_REG_TVAL: 152 val = readl_relaxed(timer->base + CNTV_TVAL); 153 break; 154 } 155 } else { 156 val = arch_timer_reg_read_cp15(access, reg); 157 } 158 159 return val; 160 } 161 162 static notrace u64 arch_counter_get_cntpct_stable(void) 163 { 164 return __arch_counter_get_cntpct_stable(); 165 } 166 167 static notrace u64 arch_counter_get_cntpct(void) 168 { 169 return __arch_counter_get_cntpct(); 170 } 171 172 static notrace u64 arch_counter_get_cntvct_stable(void) 173 { 174 return __arch_counter_get_cntvct_stable(); 175 } 176 177 static notrace u64 arch_counter_get_cntvct(void) 178 { 179 return __arch_counter_get_cntvct(); 180 } 181 182 /* 183 * Default to cp15 based access because arm64 uses this function for 184 * sched_clock() before DT is probed and the cp15 method is guaranteed 185 * to exist on arm64. arm doesn't use this before DT is probed so even 186 * if we don't have the cp15 accessors we won't have a problem. 187 */ 188 u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct; 189 EXPORT_SYMBOL_GPL(arch_timer_read_counter); 190 191 static u64 arch_counter_read(struct clocksource *cs) 192 { 193 return arch_timer_read_counter(); 194 } 195 196 static u64 arch_counter_read_cc(const struct cyclecounter *cc) 197 { 198 return arch_timer_read_counter(); 199 } 200 201 static struct clocksource clocksource_counter = { 202 .name = "arch_sys_counter", 203 .rating = 400, 204 .read = arch_counter_read, 205 .mask = CLOCKSOURCE_MASK(56), 206 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 207 }; 208 209 static struct cyclecounter cyclecounter __ro_after_init = { 210 .read = arch_counter_read_cc, 211 .mask = CLOCKSOURCE_MASK(56), 212 }; 213 214 struct ate_acpi_oem_info { 215 char oem_id[ACPI_OEM_ID_SIZE + 1]; 216 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 217 u32 oem_revision; 218 }; 219 220 #ifdef CONFIG_FSL_ERRATUM_A008585 221 /* 222 * The number of retries is an arbitrary value well beyond the highest number 223 * of iterations the loop has been observed to take. 224 */ 225 #define __fsl_a008585_read_reg(reg) ({ \ 226 u64 _old, _new; \ 227 int _retries = 200; \ 228 \ 229 do { \ 230 _old = read_sysreg(reg); \ 231 _new = read_sysreg(reg); \ 232 _retries--; \ 233 } while (unlikely(_old != _new) && _retries); \ 234 \ 235 WARN_ON_ONCE(!_retries); \ 236 _new; \ 237 }) 238 239 static u32 notrace fsl_a008585_read_cntp_tval_el0(void) 240 { 241 return __fsl_a008585_read_reg(cntp_tval_el0); 242 } 243 244 static u32 notrace fsl_a008585_read_cntv_tval_el0(void) 245 { 246 return __fsl_a008585_read_reg(cntv_tval_el0); 247 } 248 249 static u64 notrace fsl_a008585_read_cntpct_el0(void) 250 { 251 return __fsl_a008585_read_reg(cntpct_el0); 252 } 253 254 static u64 notrace fsl_a008585_read_cntvct_el0(void) 255 { 256 return __fsl_a008585_read_reg(cntvct_el0); 257 } 258 #endif 259 260 #ifdef CONFIG_HISILICON_ERRATUM_161010101 261 /* 262 * Verify whether the value of the second read is larger than the first by 263 * less than 32 is the only way to confirm the value is correct, so clear the 264 * lower 5 bits to check whether the difference is greater than 32 or not. 265 * Theoretically the erratum should not occur more than twice in succession 266 * when reading the system counter, but it is possible that some interrupts 267 * may lead to more than twice read errors, triggering the warning, so setting 268 * the number of retries far beyond the number of iterations the loop has been 269 * observed to take. 270 */ 271 #define __hisi_161010101_read_reg(reg) ({ \ 272 u64 _old, _new; \ 273 int _retries = 50; \ 274 \ 275 do { \ 276 _old = read_sysreg(reg); \ 277 _new = read_sysreg(reg); \ 278 _retries--; \ 279 } while (unlikely((_new - _old) >> 5) && _retries); \ 280 \ 281 WARN_ON_ONCE(!_retries); \ 282 _new; \ 283 }) 284 285 static u32 notrace hisi_161010101_read_cntp_tval_el0(void) 286 { 287 return __hisi_161010101_read_reg(cntp_tval_el0); 288 } 289 290 static u32 notrace hisi_161010101_read_cntv_tval_el0(void) 291 { 292 return __hisi_161010101_read_reg(cntv_tval_el0); 293 } 294 295 static u64 notrace hisi_161010101_read_cntpct_el0(void) 296 { 297 return __hisi_161010101_read_reg(cntpct_el0); 298 } 299 300 static u64 notrace hisi_161010101_read_cntvct_el0(void) 301 { 302 return __hisi_161010101_read_reg(cntvct_el0); 303 } 304 305 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { 306 /* 307 * Note that trailing spaces are required to properly match 308 * the OEM table information. 309 */ 310 { 311 .oem_id = "HISI ", 312 .oem_table_id = "HIP05 ", 313 .oem_revision = 0, 314 }, 315 { 316 .oem_id = "HISI ", 317 .oem_table_id = "HIP06 ", 318 .oem_revision = 0, 319 }, 320 { 321 .oem_id = "HISI ", 322 .oem_table_id = "HIP07 ", 323 .oem_revision = 0, 324 }, 325 { /* Sentinel indicating the end of the OEM array */ }, 326 }; 327 #endif 328 329 #ifdef CONFIG_ARM64_ERRATUM_858921 330 static u64 notrace arm64_858921_read_cntpct_el0(void) 331 { 332 u64 old, new; 333 334 old = read_sysreg(cntpct_el0); 335 new = read_sysreg(cntpct_el0); 336 return (((old ^ new) >> 32) & 1) ? old : new; 337 } 338 339 static u64 notrace arm64_858921_read_cntvct_el0(void) 340 { 341 u64 old, new; 342 343 old = read_sysreg(cntvct_el0); 344 new = read_sysreg(cntvct_el0); 345 return (((old ^ new) >> 32) & 1) ? old : new; 346 } 347 #endif 348 349 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 350 /* 351 * The low bits of the counter registers are indeterminate while bit 10 or 352 * greater is rolling over. Since the counter value can jump both backward 353 * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values 354 * with all ones or all zeros in the low bits. Bound the loop by the maximum 355 * number of CPU cycles in 3 consecutive 24 MHz counter periods. 356 */ 357 #define __sun50i_a64_read_reg(reg) ({ \ 358 u64 _val; \ 359 int _retries = 150; \ 360 \ 361 do { \ 362 _val = read_sysreg(reg); \ 363 _retries--; \ 364 } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \ 365 \ 366 WARN_ON_ONCE(!_retries); \ 367 _val; \ 368 }) 369 370 static u64 notrace sun50i_a64_read_cntpct_el0(void) 371 { 372 return __sun50i_a64_read_reg(cntpct_el0); 373 } 374 375 static u64 notrace sun50i_a64_read_cntvct_el0(void) 376 { 377 return __sun50i_a64_read_reg(cntvct_el0); 378 } 379 380 static u32 notrace sun50i_a64_read_cntp_tval_el0(void) 381 { 382 return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0(); 383 } 384 385 static u32 notrace sun50i_a64_read_cntv_tval_el0(void) 386 { 387 return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0(); 388 } 389 #endif 390 391 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 392 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); 393 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); 394 395 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); 396 397 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, 398 struct clock_event_device *clk) 399 { 400 unsigned long ctrl; 401 u64 cval; 402 403 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 404 ctrl |= ARCH_TIMER_CTRL_ENABLE; 405 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 406 407 if (access == ARCH_TIMER_PHYS_ACCESS) { 408 cval = evt + arch_counter_get_cntpct_stable(); 409 write_sysreg(cval, cntp_cval_el0); 410 } else { 411 cval = evt + arch_counter_get_cntvct_stable(); 412 write_sysreg(cval, cntv_cval_el0); 413 } 414 415 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 416 } 417 418 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt, 419 struct clock_event_device *clk) 420 { 421 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); 422 return 0; 423 } 424 425 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt, 426 struct clock_event_device *clk) 427 { 428 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); 429 return 0; 430 } 431 432 static const struct arch_timer_erratum_workaround ool_workarounds[] = { 433 #ifdef CONFIG_FSL_ERRATUM_A008585 434 { 435 .match_type = ate_match_dt, 436 .id = "fsl,erratum-a008585", 437 .desc = "Freescale erratum a005858", 438 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, 439 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, 440 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, 441 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, 442 .set_next_event_phys = erratum_set_next_event_tval_phys, 443 .set_next_event_virt = erratum_set_next_event_tval_virt, 444 }, 445 #endif 446 #ifdef CONFIG_HISILICON_ERRATUM_161010101 447 { 448 .match_type = ate_match_dt, 449 .id = "hisilicon,erratum-161010101", 450 .desc = "HiSilicon erratum 161010101", 451 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 452 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 453 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, 454 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 455 .set_next_event_phys = erratum_set_next_event_tval_phys, 456 .set_next_event_virt = erratum_set_next_event_tval_virt, 457 }, 458 { 459 .match_type = ate_match_acpi_oem_info, 460 .id = hisi_161010101_oem_info, 461 .desc = "HiSilicon erratum 161010101", 462 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 463 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 464 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, 465 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 466 .set_next_event_phys = erratum_set_next_event_tval_phys, 467 .set_next_event_virt = erratum_set_next_event_tval_virt, 468 }, 469 #endif 470 #ifdef CONFIG_ARM64_ERRATUM_858921 471 { 472 .match_type = ate_match_local_cap_id, 473 .id = (void *)ARM64_WORKAROUND_858921, 474 .desc = "ARM erratum 858921", 475 .read_cntpct_el0 = arm64_858921_read_cntpct_el0, 476 .read_cntvct_el0 = arm64_858921_read_cntvct_el0, 477 }, 478 #endif 479 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 480 { 481 .match_type = ate_match_dt, 482 .id = "allwinner,erratum-unknown1", 483 .desc = "Allwinner erratum UNKNOWN1", 484 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0, 485 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0, 486 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0, 487 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0, 488 .set_next_event_phys = erratum_set_next_event_tval_phys, 489 .set_next_event_virt = erratum_set_next_event_tval_virt, 490 }, 491 #endif 492 #ifdef CONFIG_ARM64_ERRATUM_1418040 493 { 494 .match_type = ate_match_local_cap_id, 495 .id = (void *)ARM64_WORKAROUND_1418040, 496 .desc = "ARM erratum 1418040", 497 .disable_compat_vdso = true, 498 }, 499 #endif 500 }; 501 502 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, 503 const void *); 504 505 static 506 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa, 507 const void *arg) 508 { 509 const struct device_node *np = arg; 510 511 return of_property_read_bool(np, wa->id); 512 } 513 514 static 515 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa, 516 const void *arg) 517 { 518 return this_cpu_has_cap((uintptr_t)wa->id); 519 } 520 521 522 static 523 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa, 524 const void *arg) 525 { 526 static const struct ate_acpi_oem_info empty_oem_info = {}; 527 const struct ate_acpi_oem_info *info = wa->id; 528 const struct acpi_table_header *table = arg; 529 530 /* Iterate over the ACPI OEM info array, looking for a match */ 531 while (memcmp(info, &empty_oem_info, sizeof(*info))) { 532 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && 533 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 534 info->oem_revision == table->oem_revision) 535 return true; 536 537 info++; 538 } 539 540 return false; 541 } 542 543 static const struct arch_timer_erratum_workaround * 544 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, 545 ate_match_fn_t match_fn, 546 void *arg) 547 { 548 int i; 549 550 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { 551 if (ool_workarounds[i].match_type != type) 552 continue; 553 554 if (match_fn(&ool_workarounds[i], arg)) 555 return &ool_workarounds[i]; 556 } 557 558 return NULL; 559 } 560 561 static 562 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa, 563 bool local) 564 { 565 int i; 566 567 if (local) { 568 __this_cpu_write(timer_unstable_counter_workaround, wa); 569 } else { 570 for_each_possible_cpu(i) 571 per_cpu(timer_unstable_counter_workaround, i) = wa; 572 } 573 574 if (wa->read_cntvct_el0 || wa->read_cntpct_el0) 575 atomic_set(&timer_unstable_counter_workaround_in_use, 1); 576 577 /* 578 * Don't use the vdso fastpath if errata require using the 579 * out-of-line counter accessor. We may change our mind pretty 580 * late in the game (with a per-CPU erratum, for example), so 581 * change both the default value and the vdso itself. 582 */ 583 if (wa->read_cntvct_el0) { 584 clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; 585 vdso_default = VDSO_CLOCKMODE_NONE; 586 } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { 587 vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; 588 clocksource_counter.vdso_clock_mode = vdso_default; 589 } 590 } 591 592 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, 593 void *arg) 594 { 595 const struct arch_timer_erratum_workaround *wa, *__wa; 596 ate_match_fn_t match_fn = NULL; 597 bool local = false; 598 599 switch (type) { 600 case ate_match_dt: 601 match_fn = arch_timer_check_dt_erratum; 602 break; 603 case ate_match_local_cap_id: 604 match_fn = arch_timer_check_local_cap_erratum; 605 local = true; 606 break; 607 case ate_match_acpi_oem_info: 608 match_fn = arch_timer_check_acpi_oem_erratum; 609 break; 610 default: 611 WARN_ON(1); 612 return; 613 } 614 615 wa = arch_timer_iterate_errata(type, match_fn, arg); 616 if (!wa) 617 return; 618 619 __wa = __this_cpu_read(timer_unstable_counter_workaround); 620 if (__wa && wa != __wa) 621 pr_warn("Can't enable workaround for %s (clashes with %s\n)", 622 wa->desc, __wa->desc); 623 624 if (__wa) 625 return; 626 627 arch_timer_enable_workaround(wa, local); 628 pr_info("Enabling %s workaround for %s\n", 629 local ? "local" : "global", wa->desc); 630 } 631 632 static bool arch_timer_this_cpu_has_cntvct_wa(void) 633 { 634 return has_erratum_handler(read_cntvct_el0); 635 } 636 637 static bool arch_timer_counter_has_wa(void) 638 { 639 return atomic_read(&timer_unstable_counter_workaround_in_use); 640 } 641 #else 642 #define arch_timer_check_ool_workaround(t,a) do { } while(0) 643 #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) 644 #define arch_timer_counter_has_wa() ({false;}) 645 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ 646 647 static __always_inline irqreturn_t timer_handler(const int access, 648 struct clock_event_device *evt) 649 { 650 unsigned long ctrl; 651 652 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 653 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 654 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 655 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 656 evt->event_handler(evt); 657 return IRQ_HANDLED; 658 } 659 660 return IRQ_NONE; 661 } 662 663 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 664 { 665 struct clock_event_device *evt = dev_id; 666 667 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 668 } 669 670 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 671 { 672 struct clock_event_device *evt = dev_id; 673 674 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 675 } 676 677 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 678 { 679 struct clock_event_device *evt = dev_id; 680 681 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 682 } 683 684 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 685 { 686 struct clock_event_device *evt = dev_id; 687 688 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 689 } 690 691 static __always_inline int timer_shutdown(const int access, 692 struct clock_event_device *clk) 693 { 694 unsigned long ctrl; 695 696 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 697 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 698 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 699 700 return 0; 701 } 702 703 static int arch_timer_shutdown_virt(struct clock_event_device *clk) 704 { 705 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk); 706 } 707 708 static int arch_timer_shutdown_phys(struct clock_event_device *clk) 709 { 710 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk); 711 } 712 713 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk) 714 { 715 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk); 716 } 717 718 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk) 719 { 720 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk); 721 } 722 723 static __always_inline void set_next_event(const int access, unsigned long evt, 724 struct clock_event_device *clk) 725 { 726 unsigned long ctrl; 727 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 728 ctrl |= ARCH_TIMER_CTRL_ENABLE; 729 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 730 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 731 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 732 } 733 734 static int arch_timer_set_next_event_virt(unsigned long evt, 735 struct clock_event_device *clk) 736 { 737 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 738 return 0; 739 } 740 741 static int arch_timer_set_next_event_phys(unsigned long evt, 742 struct clock_event_device *clk) 743 { 744 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 745 return 0; 746 } 747 748 static int arch_timer_set_next_event_virt_mem(unsigned long evt, 749 struct clock_event_device *clk) 750 { 751 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 752 return 0; 753 } 754 755 static int arch_timer_set_next_event_phys_mem(unsigned long evt, 756 struct clock_event_device *clk) 757 { 758 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 759 return 0; 760 } 761 762 static void __arch_timer_setup(unsigned type, 763 struct clock_event_device *clk) 764 { 765 clk->features = CLOCK_EVT_FEAT_ONESHOT; 766 767 if (type == ARCH_TIMER_TYPE_CP15) { 768 typeof(clk->set_next_event) sne; 769 770 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); 771 772 if (arch_timer_c3stop) 773 clk->features |= CLOCK_EVT_FEAT_C3STOP; 774 clk->name = "arch_sys_timer"; 775 clk->rating = 450; 776 clk->cpumask = cpumask_of(smp_processor_id()); 777 clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; 778 switch (arch_timer_uses_ppi) { 779 case ARCH_TIMER_VIRT_PPI: 780 clk->set_state_shutdown = arch_timer_shutdown_virt; 781 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; 782 sne = erratum_handler(set_next_event_virt); 783 break; 784 case ARCH_TIMER_PHYS_SECURE_PPI: 785 case ARCH_TIMER_PHYS_NONSECURE_PPI: 786 case ARCH_TIMER_HYP_PPI: 787 clk->set_state_shutdown = arch_timer_shutdown_phys; 788 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; 789 sne = erratum_handler(set_next_event_phys); 790 break; 791 default: 792 BUG(); 793 } 794 795 clk->set_next_event = sne; 796 } else { 797 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 798 clk->name = "arch_mem_timer"; 799 clk->rating = 400; 800 clk->cpumask = cpu_possible_mask; 801 if (arch_timer_mem_use_virtual) { 802 clk->set_state_shutdown = arch_timer_shutdown_virt_mem; 803 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; 804 clk->set_next_event = 805 arch_timer_set_next_event_virt_mem; 806 } else { 807 clk->set_state_shutdown = arch_timer_shutdown_phys_mem; 808 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem; 809 clk->set_next_event = 810 arch_timer_set_next_event_phys_mem; 811 } 812 } 813 814 clk->set_state_shutdown(clk); 815 816 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 817 } 818 819 static void arch_timer_evtstrm_enable(int divider) 820 { 821 u32 cntkctl = arch_timer_get_cntkctl(); 822 823 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; 824 /* Set the divider and enable virtual event stream */ 825 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) 826 | ARCH_TIMER_VIRT_EVT_EN; 827 arch_timer_set_cntkctl(cntkctl); 828 arch_timer_set_evtstrm_feature(); 829 cpumask_set_cpu(smp_processor_id(), &evtstrm_available); 830 } 831 832 static void arch_timer_configure_evtstream(void) 833 { 834 int evt_stream_div, lsb; 835 836 /* 837 * As the event stream can at most be generated at half the frequency 838 * of the counter, use half the frequency when computing the divider. 839 */ 840 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; 841 842 /* 843 * Find the closest power of two to the divisor. If the adjacent bit 844 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). 845 */ 846 lsb = fls(evt_stream_div) - 1; 847 if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) 848 lsb++; 849 850 /* enable event stream */ 851 arch_timer_evtstrm_enable(max(0, min(lsb, 15))); 852 } 853 854 static void arch_counter_set_user_access(void) 855 { 856 u32 cntkctl = arch_timer_get_cntkctl(); 857 858 /* Disable user access to the timers and both counters */ 859 /* Also disable virtual event stream */ 860 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN 861 | ARCH_TIMER_USR_VT_ACCESS_EN 862 | ARCH_TIMER_USR_VCT_ACCESS_EN 863 | ARCH_TIMER_VIRT_EVT_EN 864 | ARCH_TIMER_USR_PCT_ACCESS_EN); 865 866 /* 867 * Enable user access to the virtual counter if it doesn't 868 * need to be workaround. The vdso may have been already 869 * disabled though. 870 */ 871 if (arch_timer_this_cpu_has_cntvct_wa()) 872 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); 873 else 874 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; 875 876 arch_timer_set_cntkctl(cntkctl); 877 } 878 879 static bool arch_timer_has_nonsecure_ppi(void) 880 { 881 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI && 882 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 883 } 884 885 static u32 check_ppi_trigger(int irq) 886 { 887 u32 flags = irq_get_trigger_type(irq); 888 889 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { 890 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); 891 pr_warn("WARNING: Please fix your firmware\n"); 892 flags = IRQF_TRIGGER_LOW; 893 } 894 895 return flags; 896 } 897 898 static int arch_timer_starting_cpu(unsigned int cpu) 899 { 900 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 901 u32 flags; 902 903 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk); 904 905 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); 906 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); 907 908 if (arch_timer_has_nonsecure_ppi()) { 909 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 910 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 911 flags); 912 } 913 914 arch_counter_set_user_access(); 915 if (evtstrm_enable) 916 arch_timer_configure_evtstream(); 917 918 return 0; 919 } 920 921 static int validate_timer_rate(void) 922 { 923 if (!arch_timer_rate) 924 return -EINVAL; 925 926 /* Arch timer frequency < 1MHz can cause trouble */ 927 WARN_ON(arch_timer_rate < 1000000); 928 929 return 0; 930 } 931 932 /* 933 * For historical reasons, when probing with DT we use whichever (non-zero) 934 * rate was probed first, and don't verify that others match. If the first node 935 * probed has a clock-frequency property, this overrides the HW register. 936 */ 937 static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np) 938 { 939 /* Who has more than one independent system counter? */ 940 if (arch_timer_rate) 941 return; 942 943 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) 944 arch_timer_rate = rate; 945 946 /* Check the timer frequency. */ 947 if (validate_timer_rate()) 948 pr_warn("frequency not available\n"); 949 } 950 951 static void __init arch_timer_banner(unsigned type) 952 { 953 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 954 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", 955 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? 956 " and " : "", 957 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "", 958 (unsigned long)arch_timer_rate / 1000000, 959 (unsigned long)(arch_timer_rate / 10000) % 100, 960 type & ARCH_TIMER_TYPE_CP15 ? 961 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" : 962 "", 963 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "", 964 type & ARCH_TIMER_TYPE_MEM ? 965 arch_timer_mem_use_virtual ? "virt" : "phys" : 966 ""); 967 } 968 969 u32 arch_timer_get_rate(void) 970 { 971 return arch_timer_rate; 972 } 973 974 bool arch_timer_evtstrm_available(void) 975 { 976 /* 977 * We might get called from a preemptible context. This is fine 978 * because availability of the event stream should be always the same 979 * for a preemptible context and context where we might resume a task. 980 */ 981 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); 982 } 983 984 static u64 arch_counter_get_cntvct_mem(void) 985 { 986 u32 vct_lo, vct_hi, tmp_hi; 987 988 do { 989 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 990 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 991 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 992 } while (vct_hi != tmp_hi); 993 994 return ((u64) vct_hi << 32) | vct_lo; 995 } 996 997 static struct arch_timer_kvm_info arch_timer_kvm_info; 998 999 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) 1000 { 1001 return &arch_timer_kvm_info; 1002 } 1003 1004 static void __init arch_counter_register(unsigned type) 1005 { 1006 u64 start_count; 1007 1008 /* Register the CP15 based counter if we have one */ 1009 if (type & ARCH_TIMER_TYPE_CP15) { 1010 u64 (*rd)(void); 1011 1012 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || 1013 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { 1014 if (arch_timer_counter_has_wa()) 1015 rd = arch_counter_get_cntvct_stable; 1016 else 1017 rd = arch_counter_get_cntvct; 1018 } else { 1019 if (arch_timer_counter_has_wa()) 1020 rd = arch_counter_get_cntpct_stable; 1021 else 1022 rd = arch_counter_get_cntpct; 1023 } 1024 1025 arch_timer_read_counter = rd; 1026 clocksource_counter.vdso_clock_mode = vdso_default; 1027 } else { 1028 arch_timer_read_counter = arch_counter_get_cntvct_mem; 1029 } 1030 1031 if (!arch_counter_suspend_stop) 1032 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; 1033 start_count = arch_timer_read_counter(); 1034 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 1035 cyclecounter.mult = clocksource_counter.mult; 1036 cyclecounter.shift = clocksource_counter.shift; 1037 timecounter_init(&arch_timer_kvm_info.timecounter, 1038 &cyclecounter, start_count); 1039 1040 /* 56 bits minimum, so we assume worst case rollover */ 1041 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 1042 } 1043 1044 static void arch_timer_stop(struct clock_event_device *clk) 1045 { 1046 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); 1047 1048 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); 1049 if (arch_timer_has_nonsecure_ppi()) 1050 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 1051 1052 clk->set_state_shutdown(clk); 1053 } 1054 1055 static int arch_timer_dying_cpu(unsigned int cpu) 1056 { 1057 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 1058 1059 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); 1060 1061 arch_timer_stop(clk); 1062 return 0; 1063 } 1064 1065 #ifdef CONFIG_CPU_PM 1066 static DEFINE_PER_CPU(unsigned long, saved_cntkctl); 1067 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 1068 unsigned long action, void *hcpu) 1069 { 1070 if (action == CPU_PM_ENTER) { 1071 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); 1072 1073 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); 1074 } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { 1075 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); 1076 1077 if (arch_timer_have_evtstrm_feature()) 1078 cpumask_set_cpu(smp_processor_id(), &evtstrm_available); 1079 } 1080 return NOTIFY_OK; 1081 } 1082 1083 static struct notifier_block arch_timer_cpu_pm_notifier = { 1084 .notifier_call = arch_timer_cpu_pm_notify, 1085 }; 1086 1087 static int __init arch_timer_cpu_pm_init(void) 1088 { 1089 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 1090 } 1091 1092 static void __init arch_timer_cpu_pm_deinit(void) 1093 { 1094 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); 1095 } 1096 1097 #else 1098 static int __init arch_timer_cpu_pm_init(void) 1099 { 1100 return 0; 1101 } 1102 1103 static void __init arch_timer_cpu_pm_deinit(void) 1104 { 1105 } 1106 #endif 1107 1108 static int __init arch_timer_register(void) 1109 { 1110 int err; 1111 int ppi; 1112 1113 arch_timer_evt = alloc_percpu(struct clock_event_device); 1114 if (!arch_timer_evt) { 1115 err = -ENOMEM; 1116 goto out; 1117 } 1118 1119 ppi = arch_timer_ppi[arch_timer_uses_ppi]; 1120 switch (arch_timer_uses_ppi) { 1121 case ARCH_TIMER_VIRT_PPI: 1122 err = request_percpu_irq(ppi, arch_timer_handler_virt, 1123 "arch_timer", arch_timer_evt); 1124 break; 1125 case ARCH_TIMER_PHYS_SECURE_PPI: 1126 case ARCH_TIMER_PHYS_NONSECURE_PPI: 1127 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1128 "arch_timer", arch_timer_evt); 1129 if (!err && arch_timer_has_nonsecure_ppi()) { 1130 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; 1131 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1132 "arch_timer", arch_timer_evt); 1133 if (err) 1134 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI], 1135 arch_timer_evt); 1136 } 1137 break; 1138 case ARCH_TIMER_HYP_PPI: 1139 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1140 "arch_timer", arch_timer_evt); 1141 break; 1142 default: 1143 BUG(); 1144 } 1145 1146 if (err) { 1147 pr_err("can't register interrupt %d (%d)\n", ppi, err); 1148 goto out_free; 1149 } 1150 1151 err = arch_timer_cpu_pm_init(); 1152 if (err) 1153 goto out_unreg_notify; 1154 1155 /* Register and immediately configure the timer on the boot CPU */ 1156 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, 1157 "clockevents/arm/arch_timer:starting", 1158 arch_timer_starting_cpu, arch_timer_dying_cpu); 1159 if (err) 1160 goto out_unreg_cpupm; 1161 return 0; 1162 1163 out_unreg_cpupm: 1164 arch_timer_cpu_pm_deinit(); 1165 1166 out_unreg_notify: 1167 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); 1168 if (arch_timer_has_nonsecure_ppi()) 1169 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 1170 arch_timer_evt); 1171 1172 out_free: 1173 free_percpu(arch_timer_evt); 1174 out: 1175 return err; 1176 } 1177 1178 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 1179 { 1180 int ret; 1181 irq_handler_t func; 1182 struct arch_timer *t; 1183 1184 t = kzalloc(sizeof(*t), GFP_KERNEL); 1185 if (!t) 1186 return -ENOMEM; 1187 1188 t->base = base; 1189 t->evt.irq = irq; 1190 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt); 1191 1192 if (arch_timer_mem_use_virtual) 1193 func = arch_timer_handler_virt_mem; 1194 else 1195 func = arch_timer_handler_phys_mem; 1196 1197 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 1198 if (ret) { 1199 pr_err("Failed to request mem timer irq\n"); 1200 kfree(t); 1201 } 1202 1203 return ret; 1204 } 1205 1206 static const struct of_device_id arch_timer_of_match[] __initconst = { 1207 { .compatible = "arm,armv7-timer", }, 1208 { .compatible = "arm,armv8-timer", }, 1209 {}, 1210 }; 1211 1212 static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 1213 { .compatible = "arm,armv7-timer-mem", }, 1214 {}, 1215 }; 1216 1217 static bool __init arch_timer_needs_of_probing(void) 1218 { 1219 struct device_node *dn; 1220 bool needs_probing = false; 1221 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM; 1222 1223 /* We have two timers, and both device-tree nodes are probed. */ 1224 if ((arch_timers_present & mask) == mask) 1225 return false; 1226 1227 /* 1228 * Only one type of timer is probed, 1229 * check if we have another type of timer node in device-tree. 1230 */ 1231 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) 1232 dn = of_find_matching_node(NULL, arch_timer_mem_of_match); 1233 else 1234 dn = of_find_matching_node(NULL, arch_timer_of_match); 1235 1236 if (dn && of_device_is_available(dn)) 1237 needs_probing = true; 1238 1239 of_node_put(dn); 1240 1241 return needs_probing; 1242 } 1243 1244 static int __init arch_timer_common_init(void) 1245 { 1246 arch_timer_banner(arch_timers_present); 1247 arch_counter_register(arch_timers_present); 1248 return arch_timer_arch_init(); 1249 } 1250 1251 /** 1252 * arch_timer_select_ppi() - Select suitable PPI for the current system. 1253 * 1254 * If HYP mode is available, we know that the physical timer 1255 * has been configured to be accessible from PL1. Use it, so 1256 * that a guest can use the virtual timer instead. 1257 * 1258 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE 1259 * accesses to CNTP_*_EL1 registers are silently redirected to 1260 * their CNTHP_*_EL2 counterparts, and use a different PPI 1261 * number. 1262 * 1263 * If no interrupt provided for virtual timer, we'll have to 1264 * stick to the physical timer. It'd better be accessible... 1265 * For arm64 we never use the secure interrupt. 1266 * 1267 * Return: a suitable PPI type for the current system. 1268 */ 1269 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) 1270 { 1271 if (is_kernel_in_hyp_mode()) 1272 return ARCH_TIMER_HYP_PPI; 1273 1274 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI]) 1275 return ARCH_TIMER_VIRT_PPI; 1276 1277 if (IS_ENABLED(CONFIG_ARM64)) 1278 return ARCH_TIMER_PHYS_NONSECURE_PPI; 1279 1280 return ARCH_TIMER_PHYS_SECURE_PPI; 1281 } 1282 1283 static void __init arch_timer_populate_kvm_info(void) 1284 { 1285 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 1286 if (is_kernel_in_hyp_mode()) 1287 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; 1288 } 1289 1290 static int __init arch_timer_of_init(struct device_node *np) 1291 { 1292 int i, irq, ret; 1293 u32 rate; 1294 bool has_names; 1295 1296 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1297 pr_warn("multiple nodes in dt, skipping\n"); 1298 return 0; 1299 } 1300 1301 arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1302 1303 has_names = of_property_read_bool(np, "interrupt-names"); 1304 1305 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) { 1306 if (has_names) 1307 irq = of_irq_get_byname(np, arch_timer_ppi_names[i]); 1308 else 1309 irq = of_irq_get(np, i); 1310 if (irq > 0) 1311 arch_timer_ppi[i] = irq; 1312 } 1313 1314 arch_timer_populate_kvm_info(); 1315 1316 rate = arch_timer_get_cntfrq(); 1317 arch_timer_of_configure_rate(rate, np); 1318 1319 arch_timer_c3stop = !of_property_read_bool(np, "always-on"); 1320 1321 /* Check for globally applicable workarounds */ 1322 arch_timer_check_ool_workaround(ate_match_dt, np); 1323 1324 /* 1325 * If we cannot rely on firmware initializing the timer registers then 1326 * we should use the physical timers instead. 1327 */ 1328 if (IS_ENABLED(CONFIG_ARM) && 1329 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 1330 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI; 1331 else 1332 arch_timer_uses_ppi = arch_timer_select_ppi(); 1333 1334 if (!arch_timer_ppi[arch_timer_uses_ppi]) { 1335 pr_err("No interrupt available, giving up\n"); 1336 return -EINVAL; 1337 } 1338 1339 /* On some systems, the counter stops ticking when in suspend. */ 1340 arch_counter_suspend_stop = of_property_read_bool(np, 1341 "arm,no-tick-in-suspend"); 1342 1343 ret = arch_timer_register(); 1344 if (ret) 1345 return ret; 1346 1347 if (arch_timer_needs_of_probing()) 1348 return 0; 1349 1350 return arch_timer_common_init(); 1351 } 1352 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 1353 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 1354 1355 static u32 __init 1356 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) 1357 { 1358 void __iomem *base; 1359 u32 rate; 1360 1361 base = ioremap(frame->cntbase, frame->size); 1362 if (!base) { 1363 pr_err("Unable to map frame @ %pa\n", &frame->cntbase); 1364 return 0; 1365 } 1366 1367 rate = readl_relaxed(base + CNTFRQ); 1368 1369 iounmap(base); 1370 1371 return rate; 1372 } 1373 1374 static struct arch_timer_mem_frame * __init 1375 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) 1376 { 1377 struct arch_timer_mem_frame *frame, *best_frame = NULL; 1378 void __iomem *cntctlbase; 1379 u32 cnttidr; 1380 int i; 1381 1382 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size); 1383 if (!cntctlbase) { 1384 pr_err("Can't map CNTCTLBase @ %pa\n", 1385 &timer_mem->cntctlbase); 1386 return NULL; 1387 } 1388 1389 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 1390 1391 /* 1392 * Try to find a virtual capable frame. Otherwise fall back to a 1393 * physical capable frame. 1394 */ 1395 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1396 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | 1397 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; 1398 1399 frame = &timer_mem->frame[i]; 1400 if (!frame->valid) 1401 continue; 1402 1403 /* Try enabling everything, and see what sticks */ 1404 writel_relaxed(cntacr, cntctlbase + CNTACR(i)); 1405 cntacr = readl_relaxed(cntctlbase + CNTACR(i)); 1406 1407 if ((cnttidr & CNTTIDR_VIRT(i)) && 1408 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { 1409 best_frame = frame; 1410 arch_timer_mem_use_virtual = true; 1411 break; 1412 } 1413 1414 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) 1415 continue; 1416 1417 best_frame = frame; 1418 } 1419 1420 iounmap(cntctlbase); 1421 1422 return best_frame; 1423 } 1424 1425 static int __init 1426 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) 1427 { 1428 void __iomem *base; 1429 int ret, irq = 0; 1430 1431 if (arch_timer_mem_use_virtual) 1432 irq = frame->virt_irq; 1433 else 1434 irq = frame->phys_irq; 1435 1436 if (!irq) { 1437 pr_err("Frame missing %s irq.\n", 1438 arch_timer_mem_use_virtual ? "virt" : "phys"); 1439 return -EINVAL; 1440 } 1441 1442 if (!request_mem_region(frame->cntbase, frame->size, 1443 "arch_mem_timer")) 1444 return -EBUSY; 1445 1446 base = ioremap(frame->cntbase, frame->size); 1447 if (!base) { 1448 pr_err("Can't map frame's registers\n"); 1449 return -ENXIO; 1450 } 1451 1452 ret = arch_timer_mem_register(base, irq); 1453 if (ret) { 1454 iounmap(base); 1455 return ret; 1456 } 1457 1458 arch_counter_base = base; 1459 arch_timers_present |= ARCH_TIMER_TYPE_MEM; 1460 1461 return 0; 1462 } 1463 1464 static int __init arch_timer_mem_of_init(struct device_node *np) 1465 { 1466 struct arch_timer_mem *timer_mem; 1467 struct arch_timer_mem_frame *frame; 1468 struct device_node *frame_node; 1469 struct resource res; 1470 int ret = -EINVAL; 1471 u32 rate; 1472 1473 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL); 1474 if (!timer_mem) 1475 return -ENOMEM; 1476 1477 if (of_address_to_resource(np, 0, &res)) 1478 goto out; 1479 timer_mem->cntctlbase = res.start; 1480 timer_mem->size = resource_size(&res); 1481 1482 for_each_available_child_of_node(np, frame_node) { 1483 u32 n; 1484 struct arch_timer_mem_frame *frame; 1485 1486 if (of_property_read_u32(frame_node, "frame-number", &n)) { 1487 pr_err(FW_BUG "Missing frame-number.\n"); 1488 of_node_put(frame_node); 1489 goto out; 1490 } 1491 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { 1492 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", 1493 ARCH_TIMER_MEM_MAX_FRAMES - 1); 1494 of_node_put(frame_node); 1495 goto out; 1496 } 1497 frame = &timer_mem->frame[n]; 1498 1499 if (frame->valid) { 1500 pr_err(FW_BUG "Duplicated frame-number.\n"); 1501 of_node_put(frame_node); 1502 goto out; 1503 } 1504 1505 if (of_address_to_resource(frame_node, 0, &res)) { 1506 of_node_put(frame_node); 1507 goto out; 1508 } 1509 frame->cntbase = res.start; 1510 frame->size = resource_size(&res); 1511 1512 frame->virt_irq = irq_of_parse_and_map(frame_node, 1513 ARCH_TIMER_VIRT_SPI); 1514 frame->phys_irq = irq_of_parse_and_map(frame_node, 1515 ARCH_TIMER_PHYS_SPI); 1516 1517 frame->valid = true; 1518 } 1519 1520 frame = arch_timer_mem_find_best_frame(timer_mem); 1521 if (!frame) { 1522 pr_err("Unable to find a suitable frame in timer @ %pa\n", 1523 &timer_mem->cntctlbase); 1524 ret = -EINVAL; 1525 goto out; 1526 } 1527 1528 rate = arch_timer_mem_frame_get_cntfrq(frame); 1529 arch_timer_of_configure_rate(rate, np); 1530 1531 ret = arch_timer_mem_frame_register(frame); 1532 if (!ret && !arch_timer_needs_of_probing()) 1533 ret = arch_timer_common_init(); 1534 out: 1535 kfree(timer_mem); 1536 return ret; 1537 } 1538 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 1539 arch_timer_mem_of_init); 1540 1541 #ifdef CONFIG_ACPI_GTDT 1542 static int __init 1543 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) 1544 { 1545 struct arch_timer_mem_frame *frame; 1546 u32 rate; 1547 int i; 1548 1549 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1550 frame = &timer_mem->frame[i]; 1551 1552 if (!frame->valid) 1553 continue; 1554 1555 rate = arch_timer_mem_frame_get_cntfrq(frame); 1556 if (rate == arch_timer_rate) 1557 continue; 1558 1559 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n", 1560 &frame->cntbase, 1561 (unsigned long)rate, (unsigned long)arch_timer_rate); 1562 1563 return -EINVAL; 1564 } 1565 1566 return 0; 1567 } 1568 1569 static int __init arch_timer_mem_acpi_init(int platform_timer_count) 1570 { 1571 struct arch_timer_mem *timers, *timer; 1572 struct arch_timer_mem_frame *frame, *best_frame = NULL; 1573 int timer_count, i, ret = 0; 1574 1575 timers = kcalloc(platform_timer_count, sizeof(*timers), 1576 GFP_KERNEL); 1577 if (!timers) 1578 return -ENOMEM; 1579 1580 ret = acpi_arch_timer_mem_init(timers, &timer_count); 1581 if (ret || !timer_count) 1582 goto out; 1583 1584 /* 1585 * While unlikely, it's theoretically possible that none of the frames 1586 * in a timer expose the combination of feature we want. 1587 */ 1588 for (i = 0; i < timer_count; i++) { 1589 timer = &timers[i]; 1590 1591 frame = arch_timer_mem_find_best_frame(timer); 1592 if (!best_frame) 1593 best_frame = frame; 1594 1595 ret = arch_timer_mem_verify_cntfrq(timer); 1596 if (ret) { 1597 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1598 goto out; 1599 } 1600 1601 if (!best_frame) /* implies !frame */ 1602 /* 1603 * Only complain about missing suitable frames if we 1604 * haven't already found one in a previous iteration. 1605 */ 1606 pr_err("Unable to find a suitable frame in timer @ %pa\n", 1607 &timer->cntctlbase); 1608 } 1609 1610 if (best_frame) 1611 ret = arch_timer_mem_frame_register(best_frame); 1612 out: 1613 kfree(timers); 1614 return ret; 1615 } 1616 1617 /* Initialize per-processor generic timer and memory-mapped timer(if present) */ 1618 static int __init arch_timer_acpi_init(struct acpi_table_header *table) 1619 { 1620 int ret, platform_timer_count; 1621 1622 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1623 pr_warn("already initialized, skipping\n"); 1624 return -EINVAL; 1625 } 1626 1627 arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1628 1629 ret = acpi_gtdt_init(table, &platform_timer_count); 1630 if (ret) 1631 return ret; 1632 1633 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] = 1634 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI); 1635 1636 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] = 1637 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI); 1638 1639 arch_timer_ppi[ARCH_TIMER_HYP_PPI] = 1640 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); 1641 1642 arch_timer_populate_kvm_info(); 1643 1644 /* 1645 * When probing via ACPI, we have no mechanism to override the sysreg 1646 * CNTFRQ value. This *must* be correct. 1647 */ 1648 arch_timer_rate = arch_timer_get_cntfrq(); 1649 ret = validate_timer_rate(); 1650 if (ret) { 1651 pr_err(FW_BUG "frequency not available.\n"); 1652 return ret; 1653 } 1654 1655 arch_timer_uses_ppi = arch_timer_select_ppi(); 1656 if (!arch_timer_ppi[arch_timer_uses_ppi]) { 1657 pr_err("No interrupt available, giving up\n"); 1658 return -EINVAL; 1659 } 1660 1661 /* Always-on capability */ 1662 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi); 1663 1664 /* Check for globally applicable workarounds */ 1665 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); 1666 1667 ret = arch_timer_register(); 1668 if (ret) 1669 return ret; 1670 1671 if (platform_timer_count && 1672 arch_timer_mem_acpi_init(platform_timer_count)) 1673 pr_err("Failed to initialize memory-mapped timer.\n"); 1674 1675 return arch_timer_common_init(); 1676 } 1677 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); 1678 #endif 1679