1 // SPDX-License-Identifier: GPL-2.0-only 2 /* linux/arch/arm/mach-exynos4/mct.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com 6 * 7 * Exynos4 MCT(Multi-Core Timer) support 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/err.h> 13 #include <linux/clk.h> 14 #include <linux/clockchips.h> 15 #include <linux/cpu.h> 16 #include <linux/delay.h> 17 #include <linux/percpu.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 #include <linux/clocksource.h> 22 #include <linux/sched_clock.h> 23 24 #define EXYNOS4_MCTREG(x) (x) 25 #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) 26 #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) 27 #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) 28 #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) 29 #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) 30 #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) 31 #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) 32 #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) 33 #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) 34 #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) 35 #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) 36 #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x))) 37 #define EXYNOS4_MCT_L_MASK (0xffffff00) 38 39 #define MCT_L_TCNTB_OFFSET (0x00) 40 #define MCT_L_ICNTB_OFFSET (0x08) 41 #define MCT_L_TCON_OFFSET (0x20) 42 #define MCT_L_INT_CSTAT_OFFSET (0x30) 43 #define MCT_L_INT_ENB_OFFSET (0x34) 44 #define MCT_L_WSTAT_OFFSET (0x40) 45 #define MCT_G_TCON_START (1 << 8) 46 #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) 47 #define MCT_G_TCON_COMP0_ENABLE (1 << 0) 48 #define MCT_L_TCON_INTERVAL_MODE (1 << 2) 49 #define MCT_L_TCON_INT_START (1 << 1) 50 #define MCT_L_TCON_TIMER_START (1 << 0) 51 52 #define TICK_BASE_CNT 1 53 54 #ifdef CONFIG_ARM 55 /* Use values higher than ARM arch timer. See 6282edb72bed. */ 56 #define MCT_CLKSOURCE_RATING 450 57 #define MCT_CLKEVENTS_RATING 500 58 #else 59 #define MCT_CLKSOURCE_RATING 350 60 #define MCT_CLKEVENTS_RATING 350 61 #endif 62 63 /* There are four Global timers starting with 0 offset */ 64 #define MCT_G0_IRQ 0 65 /* Local timers count starts after global timer count */ 66 #define MCT_L0_IRQ 4 67 /* Max number of IRQ as per DT binding document */ 68 #define MCT_NR_IRQS 20 69 /* Max number of local timers */ 70 #define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ) 71 72 enum { 73 MCT_INT_SPI, 74 MCT_INT_PPI 75 }; 76 77 static void __iomem *reg_base; 78 static unsigned long clk_rate; 79 static unsigned int mct_int_type; 80 static int mct_irqs[MCT_NR_IRQS]; 81 82 struct mct_clock_event_device { 83 struct clock_event_device evt; 84 unsigned long base; 85 /** 86 * The length of the name must be adjusted if number of 87 * local timer interrupts grow over two digits 88 */ 89 char name[11]; 90 }; 91 92 static void exynos4_mct_write(unsigned int value, unsigned long offset) 93 { 94 unsigned long stat_addr; 95 u32 mask; 96 u32 i; 97 98 writel_relaxed(value, reg_base + offset); 99 100 if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { 101 stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; 102 switch (offset & ~EXYNOS4_MCT_L_MASK) { 103 case MCT_L_TCON_OFFSET: 104 mask = 1 << 3; /* L_TCON write status */ 105 break; 106 case MCT_L_ICNTB_OFFSET: 107 mask = 1 << 1; /* L_ICNTB write status */ 108 break; 109 case MCT_L_TCNTB_OFFSET: 110 mask = 1 << 0; /* L_TCNTB write status */ 111 break; 112 default: 113 return; 114 } 115 } else { 116 switch (offset) { 117 case EXYNOS4_MCT_G_TCON: 118 stat_addr = EXYNOS4_MCT_G_WSTAT; 119 mask = 1 << 16; /* G_TCON write status */ 120 break; 121 case EXYNOS4_MCT_G_COMP0_L: 122 stat_addr = EXYNOS4_MCT_G_WSTAT; 123 mask = 1 << 0; /* G_COMP0_L write status */ 124 break; 125 case EXYNOS4_MCT_G_COMP0_U: 126 stat_addr = EXYNOS4_MCT_G_WSTAT; 127 mask = 1 << 1; /* G_COMP0_U write status */ 128 break; 129 case EXYNOS4_MCT_G_COMP0_ADD_INCR: 130 stat_addr = EXYNOS4_MCT_G_WSTAT; 131 mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ 132 break; 133 case EXYNOS4_MCT_G_CNT_L: 134 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 135 mask = 1 << 0; /* G_CNT_L write status */ 136 break; 137 case EXYNOS4_MCT_G_CNT_U: 138 stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; 139 mask = 1 << 1; /* G_CNT_U write status */ 140 break; 141 default: 142 return; 143 } 144 } 145 146 /* Wait maximum 1 ms until written values are applied */ 147 for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) 148 if (readl_relaxed(reg_base + stat_addr) & mask) { 149 writel_relaxed(mask, reg_base + stat_addr); 150 return; 151 } 152 153 panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); 154 } 155 156 /* Clocksource handling */ 157 static void exynos4_mct_frc_start(void) 158 { 159 u32 reg; 160 161 reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 162 reg |= MCT_G_TCON_START; 163 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 164 } 165 166 /** 167 * exynos4_read_count_64 - Read all 64-bits of the global counter 168 * 169 * This will read all 64-bits of the global counter taking care to make sure 170 * that the upper and lower half match. Note that reading the MCT can be quite 171 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half 172 * only) version when possible. 173 * 174 * Returns the number of cycles in the global counter. 175 */ 176 static u64 exynos4_read_count_64(void) 177 { 178 unsigned int lo, hi; 179 u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 180 181 do { 182 hi = hi2; 183 lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 184 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); 185 } while (hi != hi2); 186 187 return ((u64)hi << 32) | lo; 188 } 189 190 /** 191 * exynos4_read_count_32 - Read the lower 32-bits of the global counter 192 * 193 * This will read just the lower 32-bits of the global counter. This is marked 194 * as notrace so it can be used by the scheduler clock. 195 * 196 * Returns the number of cycles in the global counter (lower 32 bits). 197 */ 198 static u32 notrace exynos4_read_count_32(void) 199 { 200 return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); 201 } 202 203 static u64 exynos4_frc_read(struct clocksource *cs) 204 { 205 return exynos4_read_count_32(); 206 } 207 208 static void exynos4_frc_resume(struct clocksource *cs) 209 { 210 exynos4_mct_frc_start(); 211 } 212 213 static struct clocksource mct_frc = { 214 .name = "mct-frc", 215 .rating = MCT_CLKSOURCE_RATING, 216 .read = exynos4_frc_read, 217 .mask = CLOCKSOURCE_MASK(32), 218 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 219 .resume = exynos4_frc_resume, 220 }; 221 222 static u64 notrace exynos4_read_sched_clock(void) 223 { 224 return exynos4_read_count_32(); 225 } 226 227 #if defined(CONFIG_ARM) 228 static struct delay_timer exynos4_delay_timer; 229 230 static cycles_t exynos4_read_current_timer(void) 231 { 232 BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), 233 "cycles_t needs to move to 32-bit for ARM64 usage"); 234 return exynos4_read_count_32(); 235 } 236 #endif 237 238 static int __init exynos4_clocksource_init(bool frc_shared) 239 { 240 /* 241 * When the frc is shared, the main processer should have already 242 * turned it on and we shouldn't be writing to TCON. 243 */ 244 if (frc_shared) 245 mct_frc.resume = NULL; 246 else 247 exynos4_mct_frc_start(); 248 249 #if defined(CONFIG_ARM) 250 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; 251 exynos4_delay_timer.freq = clk_rate; 252 register_current_timer_delay(&exynos4_delay_timer); 253 #endif 254 255 if (clocksource_register_hz(&mct_frc, clk_rate)) 256 panic("%s: can't register clocksource\n", mct_frc.name); 257 258 sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); 259 260 return 0; 261 } 262 263 static void exynos4_mct_comp0_stop(void) 264 { 265 unsigned int tcon; 266 267 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 268 tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); 269 270 exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); 271 exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); 272 } 273 274 static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) 275 { 276 unsigned int tcon; 277 u64 comp_cycle; 278 279 tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); 280 281 if (periodic) { 282 tcon |= MCT_G_TCON_COMP0_AUTO_INC; 283 exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); 284 } 285 286 comp_cycle = exynos4_read_count_64() + cycles; 287 exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); 288 exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); 289 290 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); 291 292 tcon |= MCT_G_TCON_COMP0_ENABLE; 293 exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); 294 } 295 296 static int exynos4_comp_set_next_event(unsigned long cycles, 297 struct clock_event_device *evt) 298 { 299 exynos4_mct_comp0_start(false, cycles); 300 301 return 0; 302 } 303 304 static int mct_set_state_shutdown(struct clock_event_device *evt) 305 { 306 exynos4_mct_comp0_stop(); 307 return 0; 308 } 309 310 static int mct_set_state_periodic(struct clock_event_device *evt) 311 { 312 unsigned long cycles_per_jiffy; 313 314 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 315 >> evt->shift); 316 exynos4_mct_comp0_stop(); 317 exynos4_mct_comp0_start(true, cycles_per_jiffy); 318 return 0; 319 } 320 321 static struct clock_event_device mct_comp_device = { 322 .name = "mct-comp", 323 .features = CLOCK_EVT_FEAT_PERIODIC | 324 CLOCK_EVT_FEAT_ONESHOT, 325 .rating = 250, 326 .set_next_event = exynos4_comp_set_next_event, 327 .set_state_periodic = mct_set_state_periodic, 328 .set_state_shutdown = mct_set_state_shutdown, 329 .set_state_oneshot = mct_set_state_shutdown, 330 .set_state_oneshot_stopped = mct_set_state_shutdown, 331 .tick_resume = mct_set_state_shutdown, 332 }; 333 334 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) 335 { 336 struct clock_event_device *evt = dev_id; 337 338 exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); 339 340 evt->event_handler(evt); 341 342 return IRQ_HANDLED; 343 } 344 345 static int exynos4_clockevent_init(void) 346 { 347 mct_comp_device.cpumask = cpumask_of(0); 348 clockevents_config_and_register(&mct_comp_device, clk_rate, 349 0xf, 0xffffffff); 350 if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr, 351 IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq", 352 &mct_comp_device)) 353 pr_err("%s: request_irq() failed\n", "mct_comp_irq"); 354 355 return 0; 356 } 357 358 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); 359 360 /* Clock event handling */ 361 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) 362 { 363 unsigned long tmp; 364 unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; 365 unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; 366 367 tmp = readl_relaxed(reg_base + offset); 368 if (tmp & mask) { 369 tmp &= ~mask; 370 exynos4_mct_write(tmp, offset); 371 } 372 } 373 374 static void exynos4_mct_tick_start(unsigned long cycles, 375 struct mct_clock_event_device *mevt) 376 { 377 unsigned long tmp; 378 379 exynos4_mct_tick_stop(mevt); 380 381 tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ 382 383 /* update interrupt count buffer */ 384 exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); 385 386 /* enable MCT tick interrupt */ 387 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); 388 389 tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); 390 tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | 391 MCT_L_TCON_INTERVAL_MODE; 392 exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); 393 } 394 395 static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) 396 { 397 /* Clear the MCT tick interrupt */ 398 if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) 399 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 400 } 401 402 static int exynos4_tick_set_next_event(unsigned long cycles, 403 struct clock_event_device *evt) 404 { 405 struct mct_clock_event_device *mevt; 406 407 mevt = container_of(evt, struct mct_clock_event_device, evt); 408 exynos4_mct_tick_start(cycles, mevt); 409 return 0; 410 } 411 412 static int set_state_shutdown(struct clock_event_device *evt) 413 { 414 struct mct_clock_event_device *mevt; 415 416 mevt = container_of(evt, struct mct_clock_event_device, evt); 417 exynos4_mct_tick_stop(mevt); 418 exynos4_mct_tick_clear(mevt); 419 return 0; 420 } 421 422 static int set_state_periodic(struct clock_event_device *evt) 423 { 424 struct mct_clock_event_device *mevt; 425 unsigned long cycles_per_jiffy; 426 427 mevt = container_of(evt, struct mct_clock_event_device, evt); 428 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 429 >> evt->shift); 430 exynos4_mct_tick_stop(mevt); 431 exynos4_mct_tick_start(cycles_per_jiffy, mevt); 432 return 0; 433 } 434 435 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) 436 { 437 struct mct_clock_event_device *mevt = dev_id; 438 struct clock_event_device *evt = &mevt->evt; 439 440 /* 441 * This is for supporting oneshot mode. 442 * Mct would generate interrupt periodically 443 * without explicit stopping. 444 */ 445 if (!clockevent_state_periodic(&mevt->evt)) 446 exynos4_mct_tick_stop(mevt); 447 448 exynos4_mct_tick_clear(mevt); 449 450 evt->event_handler(evt); 451 452 return IRQ_HANDLED; 453 } 454 455 static int exynos4_mct_starting_cpu(unsigned int cpu) 456 { 457 struct mct_clock_event_device *mevt = 458 per_cpu_ptr(&percpu_mct_tick, cpu); 459 struct clock_event_device *evt = &mevt->evt; 460 461 snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); 462 463 evt->name = mevt->name; 464 evt->cpumask = cpumask_of(cpu); 465 evt->set_next_event = exynos4_tick_set_next_event; 466 evt->set_state_periodic = set_state_periodic; 467 evt->set_state_shutdown = set_state_shutdown; 468 evt->set_state_oneshot = set_state_shutdown; 469 evt->set_state_oneshot_stopped = set_state_shutdown; 470 evt->tick_resume = set_state_shutdown; 471 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 472 CLOCK_EVT_FEAT_PERCPU; 473 evt->rating = MCT_CLKEVENTS_RATING; 474 475 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); 476 477 if (mct_int_type == MCT_INT_SPI) { 478 479 if (evt->irq == -1) 480 return -EIO; 481 482 irq_force_affinity(evt->irq, cpumask_of(cpu)); 483 enable_irq(evt->irq); 484 } else { 485 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 486 } 487 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), 488 0xf, 0x7fffffff); 489 490 return 0; 491 } 492 493 static int exynos4_mct_dying_cpu(unsigned int cpu) 494 { 495 struct mct_clock_event_device *mevt = 496 per_cpu_ptr(&percpu_mct_tick, cpu); 497 struct clock_event_device *evt = &mevt->evt; 498 499 if (mct_int_type == MCT_INT_SPI) { 500 if (evt->irq != -1) 501 disable_irq_nosync(evt->irq); 502 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); 503 } else { 504 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 505 } 506 return 0; 507 } 508 509 static int __init exynos4_timer_resources(struct device_node *np) 510 { 511 struct clk *mct_clk, *tick_clk; 512 513 reg_base = of_iomap(np, 0); 514 if (!reg_base) 515 panic("%s: unable to ioremap mct address space\n", __func__); 516 517 tick_clk = of_clk_get_by_name(np, "fin_pll"); 518 if (IS_ERR(tick_clk)) 519 panic("%s: unable to determine tick clock rate\n", __func__); 520 clk_rate = clk_get_rate(tick_clk); 521 522 mct_clk = of_clk_get_by_name(np, "mct"); 523 if (IS_ERR(mct_clk)) 524 panic("%s: unable to retrieve mct clock instance\n", __func__); 525 clk_prepare_enable(mct_clk); 526 527 return 0; 528 } 529 530 /** 531 * exynos4_timer_interrupts - initialize MCT interrupts 532 * @np: device node for MCT 533 * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI 534 * @local_idx: array mapping CPU numbers to local timer indices 535 * @nr_local: size of @local_idx array 536 */ 537 static int __init exynos4_timer_interrupts(struct device_node *np, 538 unsigned int int_type, 539 const u32 *local_idx, 540 size_t nr_local) 541 { 542 int nr_irqs, i, err, cpu; 543 544 mct_int_type = int_type; 545 546 /* This driver uses only one global timer interrupt */ 547 mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); 548 549 /* 550 * Find out the number of local irqs specified. The local 551 * timer irqs are specified after the four global timer 552 * irqs are specified. 553 */ 554 nr_irqs = of_irq_count(np); 555 if (nr_irqs > ARRAY_SIZE(mct_irqs)) { 556 pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", 557 nr_irqs); 558 nr_irqs = ARRAY_SIZE(mct_irqs); 559 } 560 for (i = MCT_L0_IRQ; i < nr_irqs; i++) 561 mct_irqs[i] = irq_of_parse_and_map(np, i); 562 563 if (mct_int_type == MCT_INT_PPI) { 564 565 err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], 566 exynos4_mct_tick_isr, "MCT", 567 &percpu_mct_tick); 568 WARN(err, "MCT: can't request IRQ %d (%d)\n", 569 mct_irqs[MCT_L0_IRQ], err); 570 } else { 571 for_each_possible_cpu(cpu) { 572 int mct_irq; 573 unsigned int irq_idx; 574 struct mct_clock_event_device *pcpu_mevt = 575 per_cpu_ptr(&percpu_mct_tick, cpu); 576 577 if (cpu >= nr_local) { 578 err = -EINVAL; 579 goto out_irq; 580 } 581 582 irq_idx = MCT_L0_IRQ + local_idx[cpu]; 583 584 pcpu_mevt->evt.irq = -1; 585 if (irq_idx >= ARRAY_SIZE(mct_irqs)) 586 break; 587 mct_irq = mct_irqs[irq_idx]; 588 589 irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); 590 if (request_irq(mct_irq, 591 exynos4_mct_tick_isr, 592 IRQF_TIMER | IRQF_NOBALANCING, 593 pcpu_mevt->name, pcpu_mevt)) { 594 pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", 595 cpu); 596 597 continue; 598 } 599 pcpu_mevt->evt.irq = mct_irq; 600 } 601 } 602 603 for_each_possible_cpu(cpu) { 604 struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu); 605 606 if (cpu >= nr_local) { 607 err = -EINVAL; 608 goto out_irq; 609 } 610 611 mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]); 612 } 613 614 /* Install hotplug callbacks which configure the timer on this CPU */ 615 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 616 "clockevents/exynos4/mct_timer:starting", 617 exynos4_mct_starting_cpu, 618 exynos4_mct_dying_cpu); 619 if (err) 620 goto out_irq; 621 622 return 0; 623 624 out_irq: 625 if (mct_int_type == MCT_INT_PPI) { 626 free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); 627 } else { 628 for_each_possible_cpu(cpu) { 629 struct mct_clock_event_device *pcpu_mevt = 630 per_cpu_ptr(&percpu_mct_tick, cpu); 631 632 if (pcpu_mevt->evt.irq != -1) { 633 free_irq(pcpu_mevt->evt.irq, pcpu_mevt); 634 pcpu_mevt->evt.irq = -1; 635 } 636 } 637 } 638 return err; 639 } 640 641 static int __init mct_init_dt(struct device_node *np, unsigned int int_type) 642 { 643 bool frc_shared = of_property_read_bool(np, "samsung,frc-shared"); 644 u32 local_idx[MCT_NR_LOCAL] = {0}; 645 int nr_local; 646 int ret; 647 648 nr_local = of_property_count_u32_elems(np, "samsung,local-timers"); 649 if (nr_local == 0) 650 return -EINVAL; 651 if (nr_local > 0) { 652 if (nr_local > ARRAY_SIZE(local_idx)) 653 return -EINVAL; 654 655 ret = of_property_read_u32_array(np, "samsung,local-timers", 656 local_idx, nr_local); 657 if (ret) 658 return ret; 659 } else { 660 int i; 661 662 nr_local = ARRAY_SIZE(local_idx); 663 for (i = 0; i < nr_local; i++) 664 local_idx[i] = i; 665 } 666 667 ret = exynos4_timer_resources(np); 668 if (ret) 669 return ret; 670 671 ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local); 672 if (ret) 673 return ret; 674 675 ret = exynos4_clocksource_init(frc_shared); 676 if (ret) 677 return ret; 678 679 /* 680 * When the FRC is shared with a main processor, this secondary 681 * processor cannot use the global comparator. 682 */ 683 if (frc_shared) 684 return 0; 685 686 return exynos4_clockevent_init(); 687 } 688 689 690 static int __init mct_init_spi(struct device_node *np) 691 { 692 return mct_init_dt(np, MCT_INT_SPI); 693 } 694 695 static int __init mct_init_ppi(struct device_node *np) 696 { 697 return mct_init_dt(np, MCT_INT_PPI); 698 } 699 TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi); 700 TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi); 701