1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/init.h> 3 #include <linux/clocksource.h> 4 #include <linux/clockchips.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/err.h> 11 #include <linux/ioport.h> 12 #include <linux/io.h> 13 #include <linux/of_address.h> 14 #include <linux/of_irq.h> 15 #include <linux/sched_clock.h> 16 #include <linux/syscore_ops.h> 17 #include <soc/at91/atmel_tcb.h> 18 19 20 /* 21 * We're configured to use a specific TC block, one that's not hooked 22 * up to external hardware, to provide a time solution: 23 * 24 * - Two channels combine to create a free-running 32 bit counter 25 * with a base rate of 5+ MHz, packaged as a clocksource (with 26 * resolution better than 200 nsec). 27 * - Some chips support 32 bit counter. A single channel is used for 28 * this 32 bit free-running counter. the second channel is not used. 29 * 30 * - The third channel may be used to provide a clockevent source, used in 31 * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ, 32 * and can handle delays of up to two seconds. For 32-bit counters, it runs at 33 * the same rate as the clocksource 34 * 35 * REVISIT behavior during system suspend states... we should disable 36 * all clocks and save the power. Easily done for clockevent devices, 37 * but clocksources won't necessarily get the needed notifications. 38 * For deeper system sleep states, this will be mandatory... 39 */ 40 41 static void __iomem *tcaddr; 42 static struct 43 { 44 u32 cmr; 45 u32 imr; 46 u32 rc; 47 bool clken; 48 } tcb_cache[3]; 49 static u32 bmr_cache; 50 51 static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 }; 52 53 static u64 tc_get_cycles(struct clocksource *cs) 54 { 55 unsigned long flags; 56 u32 lower, upper; 57 58 raw_local_irq_save(flags); 59 do { 60 upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)); 61 lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 62 } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV))); 63 64 raw_local_irq_restore(flags); 65 return (upper << 16) | lower; 66 } 67 68 static u64 tc_get_cycles32(struct clocksource *cs) 69 { 70 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 71 } 72 73 static void tc_clksrc_suspend(struct clocksource *cs) 74 { 75 int i; 76 77 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 78 tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR)); 79 tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR)); 80 tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC)); 81 tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) & 82 ATMEL_TC_CLKSTA); 83 } 84 85 bmr_cache = readl(tcaddr + ATMEL_TC_BMR); 86 } 87 88 static void tc_clksrc_resume(struct clocksource *cs) 89 { 90 int i; 91 92 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 93 /* Restore registers for the channel, RA and RB are not used */ 94 writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR)); 95 writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC)); 96 writel(0, tcaddr + ATMEL_TC_REG(i, RA)); 97 writel(0, tcaddr + ATMEL_TC_REG(i, RB)); 98 /* Disable all the interrupts */ 99 writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR)); 100 /* Reenable interrupts that were enabled before suspending */ 101 writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER)); 102 /* Start the clock if it was used */ 103 if (tcb_cache[i].clken) 104 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR)); 105 } 106 107 /* Dual channel, chain channels */ 108 writel(bmr_cache, tcaddr + ATMEL_TC_BMR); 109 /* Finally, trigger all the channels*/ 110 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 111 } 112 113 static struct clocksource clksrc = { 114 .rating = 200, 115 .read = tc_get_cycles, 116 .mask = CLOCKSOURCE_MASK(32), 117 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 118 .suspend = tc_clksrc_suspend, 119 .resume = tc_clksrc_resume, 120 }; 121 122 static u64 notrace tc_sched_clock_read(void) 123 { 124 return tc_get_cycles(&clksrc); 125 } 126 127 static u64 notrace tc_sched_clock_read32(void) 128 { 129 return tc_get_cycles32(&clksrc); 130 } 131 132 static struct delay_timer tc_delay_timer; 133 134 static unsigned long tc_delay_timer_read(void) 135 { 136 return tc_get_cycles(&clksrc); 137 } 138 139 static unsigned long notrace tc_delay_timer_read32(void) 140 { 141 return tc_get_cycles32(&clksrc); 142 } 143 144 #ifdef CONFIG_GENERIC_CLOCKEVENTS 145 146 struct tc_clkevt_device { 147 struct clock_event_device clkevt; 148 struct clk *clk; 149 u32 rate; 150 void __iomem *regs; 151 }; 152 153 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) 154 { 155 return container_of(clkevt, struct tc_clkevt_device, clkevt); 156 } 157 158 static u32 timer_clock; 159 160 static int tc_shutdown(struct clock_event_device *d) 161 { 162 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 163 void __iomem *regs = tcd->regs; 164 165 writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 166 writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 167 if (!clockevent_state_detached(d)) 168 clk_disable(tcd->clk); 169 170 return 0; 171 } 172 173 static int tc_set_oneshot(struct clock_event_device *d) 174 { 175 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 176 void __iomem *regs = tcd->regs; 177 178 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 179 tc_shutdown(d); 180 181 clk_enable(tcd->clk); 182 183 /* count up to RC, then irq and stop */ 184 writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | 185 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); 186 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 187 188 /* set_next_event() configures and starts the timer */ 189 return 0; 190 } 191 192 static int tc_set_periodic(struct clock_event_device *d) 193 { 194 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 195 void __iomem *regs = tcd->regs; 196 197 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 198 tc_shutdown(d); 199 200 /* By not making the gentime core emulate periodic mode on top 201 * of oneshot, we get lower overhead and improved accuracy. 202 */ 203 clk_enable(tcd->clk); 204 205 /* count up to RC, then irq and restart */ 206 writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, 207 regs + ATMEL_TC_REG(2, CMR)); 208 writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); 209 210 /* Enable clock and interrupts on RC compare */ 211 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 212 213 /* go go gadget! */ 214 writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + 215 ATMEL_TC_REG(2, CCR)); 216 return 0; 217 } 218 219 static int tc_next_event(unsigned long delta, struct clock_event_device *d) 220 { 221 writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC)); 222 223 /* go go gadget! */ 224 writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, 225 tcaddr + ATMEL_TC_REG(2, CCR)); 226 return 0; 227 } 228 229 static struct tc_clkevt_device clkevt = { 230 .clkevt = { 231 .features = CLOCK_EVT_FEAT_PERIODIC | 232 CLOCK_EVT_FEAT_ONESHOT, 233 /* Should be lower than at91rm9200's system timer */ 234 .rating = 125, 235 .set_next_event = tc_next_event, 236 .set_state_shutdown = tc_shutdown, 237 .set_state_periodic = tc_set_periodic, 238 .set_state_oneshot = tc_set_oneshot, 239 }, 240 }; 241 242 static irqreturn_t ch2_irq(int irq, void *handle) 243 { 244 struct tc_clkevt_device *dev = handle; 245 unsigned int sr; 246 247 sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR)); 248 if (sr & ATMEL_TC_CPCS) { 249 dev->clkevt.event_handler(&dev->clkevt); 250 return IRQ_HANDLED; 251 } 252 253 return IRQ_NONE; 254 } 255 256 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) 257 { 258 int ret; 259 struct clk *t2_clk = tc->clk[2]; 260 int irq = tc->irq[2]; 261 int bits = tc->tcb_config->counter_width; 262 263 /* try to enable t2 clk to avoid future errors in mode change */ 264 ret = clk_prepare_enable(t2_clk); 265 if (ret) 266 return ret; 267 268 clkevt.regs = tc->regs; 269 clkevt.clk = t2_clk; 270 271 if (bits == 32) { 272 timer_clock = divisor_idx; 273 clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx]; 274 } else { 275 ret = clk_prepare_enable(tc->slow_clk); 276 if (ret) { 277 clk_disable_unprepare(t2_clk); 278 return ret; 279 } 280 281 clkevt.rate = clk_get_rate(tc->slow_clk); 282 timer_clock = ATMEL_TC_TIMER_CLOCK5; 283 } 284 285 clk_disable(t2_clk); 286 287 clkevt.clkevt.cpumask = cpumask_of(0); 288 289 ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt); 290 if (ret) { 291 clk_unprepare(t2_clk); 292 if (bits != 32) 293 clk_disable_unprepare(tc->slow_clk); 294 return ret; 295 } 296 297 clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1); 298 299 return ret; 300 } 301 302 #else /* !CONFIG_GENERIC_CLOCKEVENTS */ 303 304 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) 305 { 306 /* NOTHING */ 307 return 0; 308 } 309 310 #endif 311 312 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) 313 { 314 /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */ 315 writel(mck_divisor_idx /* likely divide-by-8 */ 316 | ATMEL_TC_WAVE 317 | ATMEL_TC_WAVESEL_UP /* free-run */ 318 | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */ 319 | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ 320 | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ 321 tcaddr + ATMEL_TC_REG(0, CMR)); 322 writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); 323 writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); 324 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 325 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 326 327 /* channel 1: waveform mode, input TIOA0 */ 328 writel(ATMEL_TC_XC1 /* input: TIOA0 */ 329 | ATMEL_TC_WAVE 330 | ATMEL_TC_WAVESEL_UP, /* free-run */ 331 tcaddr + ATMEL_TC_REG(1, CMR)); 332 writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */ 333 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); 334 335 /* chain channel 0 to channel 1*/ 336 writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); 337 /* then reset all the timers */ 338 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 339 } 340 341 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) 342 { 343 /* channel 0: waveform mode, input mclk/8 */ 344 writel(mck_divisor_idx /* likely divide-by-8 */ 345 | ATMEL_TC_WAVE 346 | ATMEL_TC_WAVESEL_UP, /* free-run */ 347 tcaddr + ATMEL_TC_REG(0, CMR)); 348 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 349 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 350 351 /* then reset all the timers */ 352 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 353 } 354 355 static struct atmel_tcb_config tcb_rm9200_config = { 356 .counter_width = 16, 357 }; 358 359 static struct atmel_tcb_config tcb_sam9x5_config = { 360 .counter_width = 32, 361 }; 362 363 static struct atmel_tcb_config tcb_sama5d2_config = { 364 .counter_width = 32, 365 .has_gclk = 1, 366 }; 367 368 static const struct of_device_id atmel_tcb_of_match[] = { 369 { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, }, 370 { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, }, 371 { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, }, 372 { /* sentinel */ } 373 }; 374 375 static int __init tcb_clksrc_init(struct device_node *node) 376 { 377 struct atmel_tc tc; 378 struct clk *t0_clk; 379 const struct of_device_id *match; 380 u64 (*tc_sched_clock)(void); 381 u32 rate, divided_rate = 0; 382 int best_divisor_idx = -1; 383 int bits; 384 int i; 385 int ret; 386 387 /* Protect against multiple calls */ 388 if (tcaddr) 389 return 0; 390 391 tc.regs = of_iomap(node->parent, 0); 392 if (!tc.regs) 393 return -ENXIO; 394 395 t0_clk = of_clk_get_by_name(node->parent, "t0_clk"); 396 if (IS_ERR(t0_clk)) 397 return PTR_ERR(t0_clk); 398 399 tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); 400 if (IS_ERR(tc.slow_clk)) 401 return PTR_ERR(tc.slow_clk); 402 403 tc.clk[0] = t0_clk; 404 tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk"); 405 if (IS_ERR(tc.clk[1])) 406 tc.clk[1] = t0_clk; 407 tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk"); 408 if (IS_ERR(tc.clk[2])) 409 tc.clk[2] = t0_clk; 410 411 tc.irq[2] = of_irq_get(node->parent, 2); 412 if (tc.irq[2] <= 0) { 413 tc.irq[2] = of_irq_get(node->parent, 0); 414 if (tc.irq[2] <= 0) 415 return -EINVAL; 416 } 417 418 match = of_match_node(atmel_tcb_of_match, node->parent); 419 if (!match) 420 return -ENODEV; 421 422 tc.tcb_config = match->data; 423 bits = tc.tcb_config->counter_width; 424 425 for (i = 0; i < ARRAY_SIZE(tc.irq); i++) 426 writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR)); 427 428 ret = clk_prepare_enable(t0_clk); 429 if (ret) { 430 pr_debug("can't enable T0 clk\n"); 431 return ret; 432 } 433 434 /* How fast will we be counting? Pick something over 5 MHz. */ 435 rate = (u32) clk_get_rate(t0_clk); 436 i = 0; 437 if (tc.tcb_config->has_gclk) 438 i = 1; 439 for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) { 440 unsigned divisor = atmel_tcb_divisors[i]; 441 unsigned tmp; 442 443 tmp = rate / divisor; 444 pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); 445 if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000)) 446 break; 447 divided_rate = tmp; 448 best_divisor_idx = i; 449 } 450 451 clksrc.name = kbasename(node->parent->full_name); 452 clkevt.clkevt.name = kbasename(node->parent->full_name); 453 pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000, 454 ((divided_rate % 1000000) + 500) / 1000); 455 456 tcaddr = tc.regs; 457 458 if (bits == 32) { 459 /* use appropriate function to read 32 bit counter */ 460 clksrc.read = tc_get_cycles32; 461 /* setup only channel 0 */ 462 tcb_setup_single_chan(&tc, best_divisor_idx); 463 tc_sched_clock = tc_sched_clock_read32; 464 tc_delay_timer.read_current_timer = tc_delay_timer_read32; 465 } else { 466 /* we have three clocks no matter what the 467 * underlying platform supports. 468 */ 469 ret = clk_prepare_enable(tc.clk[1]); 470 if (ret) { 471 pr_debug("can't enable T1 clk\n"); 472 goto err_disable_t0; 473 } 474 /* setup both channel 0 & 1 */ 475 tcb_setup_dual_chan(&tc, best_divisor_idx); 476 tc_sched_clock = tc_sched_clock_read; 477 tc_delay_timer.read_current_timer = tc_delay_timer_read; 478 } 479 480 /* and away we go! */ 481 ret = clocksource_register_hz(&clksrc, divided_rate); 482 if (ret) 483 goto err_disable_t1; 484 485 /* channel 2: periodic and oneshot timer support */ 486 ret = setup_clkevents(&tc, best_divisor_idx); 487 if (ret) 488 goto err_unregister_clksrc; 489 490 sched_clock_register(tc_sched_clock, 32, divided_rate); 491 492 tc_delay_timer.freq = divided_rate; 493 register_current_timer_delay(&tc_delay_timer); 494 495 return 0; 496 497 err_unregister_clksrc: 498 clocksource_unregister(&clksrc); 499 500 err_disable_t1: 501 if (bits != 32) 502 clk_disable_unprepare(tc.clk[1]); 503 504 err_disable_t0: 505 clk_disable_unprepare(t0_clk); 506 507 tcaddr = NULL; 508 509 return ret; 510 } 511 TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); 512