1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright (C) 2000-2001 Deep Blue Solutions 4 // Copyright (C) 2002 Shane Nay (shane@minirl.com) 5 // Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com) 6 // Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) 7 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/clockchips.h> 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/err.h> 14 #include <linux/sched_clock.h> 15 #include <linux/slab.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 20 /* 21 * There are 4 versions of the timer hardware on Freescale MXC hardware. 22 * - MX1/MXL 23 * - MX21, MX27. 24 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0) 25 * - MX6DL, MX6SX, MX6Q(rev1.1+) 26 */ 27 enum imx_gpt_type { 28 GPT_TYPE_IMX1, /* i.MX1 */ 29 GPT_TYPE_IMX21, /* i.MX21/27 */ 30 GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */ 31 GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */ 32 }; 33 34 /* defines common for all i.MX */ 35 #define MXC_TCTL 0x00 36 #define MXC_TCTL_TEN (1 << 0) /* Enable module */ 37 #define MXC_TPRER 0x04 38 39 /* MX1, MX21, MX27 */ 40 #define MX1_2_TCTL_CLK_PCLK1 (1 << 1) 41 #define MX1_2_TCTL_IRQEN (1 << 4) 42 #define MX1_2_TCTL_FRR (1 << 8) 43 #define MX1_2_TCMP 0x08 44 #define MX1_2_TCN 0x10 45 #define MX1_2_TSTAT 0x14 46 47 /* MX21, MX27 */ 48 #define MX2_TSTAT_CAPT (1 << 1) 49 #define MX2_TSTAT_COMP (1 << 0) 50 51 /* MX31, MX35, MX25, MX5, MX6 */ 52 #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ 53 #define V2_TCTL_CLK_IPG (1 << 6) 54 #define V2_TCTL_CLK_PER (2 << 6) 55 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6) 56 #define V2_TCTL_FRR (1 << 9) 57 #define V2_TCTL_24MEN (1 << 10) 58 #define V2_TPRER_PRE24M 12 59 #define V2_IR 0x0c 60 #define V2_TSTAT 0x08 61 #define V2_TSTAT_OF1 (1 << 0) 62 #define V2_TCN 0x24 63 #define V2_TCMP 0x10 64 65 #define V2_TIMER_RATE_OSC_DIV8 3000000 66 67 struct imx_timer { 68 enum imx_gpt_type type; 69 void __iomem *base; 70 int irq; 71 struct clk *clk_per; 72 struct clk *clk_ipg; 73 const struct imx_gpt_data *gpt; 74 struct clock_event_device ced; 75 }; 76 77 struct imx_gpt_data { 78 int reg_tstat; 79 int reg_tcn; 80 int reg_tcmp; 81 void (*gpt_setup_tctl)(struct imx_timer *imxtm); 82 void (*gpt_irq_enable)(struct imx_timer *imxtm); 83 void (*gpt_irq_disable)(struct imx_timer *imxtm); 84 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm); 85 int (*set_next_event)(unsigned long evt, 86 struct clock_event_device *ced); 87 }; 88 89 static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced) 90 { 91 return container_of(ced, struct imx_timer, ced); 92 } 93 94 static void imx1_gpt_irq_disable(struct imx_timer *imxtm) 95 { 96 unsigned int tmp; 97 98 tmp = readl_relaxed(imxtm->base + MXC_TCTL); 99 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); 100 } 101 102 static void imx31_gpt_irq_disable(struct imx_timer *imxtm) 103 { 104 writel_relaxed(0, imxtm->base + V2_IR); 105 } 106 107 static void imx1_gpt_irq_enable(struct imx_timer *imxtm) 108 { 109 unsigned int tmp; 110 111 tmp = readl_relaxed(imxtm->base + MXC_TCTL); 112 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); 113 } 114 115 static void imx31_gpt_irq_enable(struct imx_timer *imxtm) 116 { 117 writel_relaxed(1<<0, imxtm->base + V2_IR); 118 } 119 120 static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm) 121 { 122 writel_relaxed(0, imxtm->base + MX1_2_TSTAT); 123 } 124 125 static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm) 126 { 127 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP, 128 imxtm->base + MX1_2_TSTAT); 129 } 130 131 static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm) 132 { 133 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT); 134 } 135 136 static void __iomem *sched_clock_reg; 137 138 static u64 notrace mxc_read_sched_clock(void) 139 { 140 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0; 141 } 142 143 #if defined(CONFIG_ARM) 144 static struct delay_timer imx_delay_timer; 145 146 static unsigned long imx_read_current_timer(void) 147 { 148 return readl_relaxed(sched_clock_reg); 149 } 150 #endif 151 152 static int __init mxc_clocksource_init(struct imx_timer *imxtm) 153 { 154 unsigned int c = clk_get_rate(imxtm->clk_per); 155 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn; 156 157 #if defined(CONFIG_ARM) 158 imx_delay_timer.read_current_timer = &imx_read_current_timer; 159 imx_delay_timer.freq = c; 160 register_current_timer_delay(&imx_delay_timer); 161 #endif 162 163 sched_clock_reg = reg; 164 165 sched_clock_register(mxc_read_sched_clock, 32, c); 166 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, 167 clocksource_mmio_readl_up); 168 } 169 170 /* clock event */ 171 172 static int mx1_2_set_next_event(unsigned long evt, 173 struct clock_event_device *ced) 174 { 175 struct imx_timer *imxtm = to_imx_timer(ced); 176 unsigned long tcmp; 177 178 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt; 179 180 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP); 181 182 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ? 183 -ETIME : 0; 184 } 185 186 static int v2_set_next_event(unsigned long evt, 187 struct clock_event_device *ced) 188 { 189 struct imx_timer *imxtm = to_imx_timer(ced); 190 unsigned long tcmp; 191 192 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt; 193 194 writel_relaxed(tcmp, imxtm->base + V2_TCMP); 195 196 return evt < 0x7fffffff && 197 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ? 198 -ETIME : 0; 199 } 200 201 static int mxc_shutdown(struct clock_event_device *ced) 202 { 203 struct imx_timer *imxtm = to_imx_timer(ced); 204 u32 tcn; 205 206 /* Disable interrupt in GPT module */ 207 imxtm->gpt->gpt_irq_disable(imxtm); 208 209 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); 210 /* Set event time into far-far future */ 211 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); 212 213 /* Clear pending interrupt */ 214 imxtm->gpt->gpt_irq_acknowledge(imxtm); 215 216 #ifdef DEBUG 217 printk(KERN_INFO "%s: changing mode\n", __func__); 218 #endif /* DEBUG */ 219 220 return 0; 221 } 222 223 static int mxc_set_oneshot(struct clock_event_device *ced) 224 { 225 struct imx_timer *imxtm = to_imx_timer(ced); 226 227 /* Disable interrupt in GPT module */ 228 imxtm->gpt->gpt_irq_disable(imxtm); 229 230 if (!clockevent_state_oneshot(ced)) { 231 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); 232 /* Set event time into far-far future */ 233 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); 234 235 /* Clear pending interrupt */ 236 imxtm->gpt->gpt_irq_acknowledge(imxtm); 237 } 238 239 #ifdef DEBUG 240 printk(KERN_INFO "%s: changing mode\n", __func__); 241 #endif /* DEBUG */ 242 243 /* 244 * Do not put overhead of interrupt enable/disable into 245 * mxc_set_next_event(), the core has about 4 minutes 246 * to call mxc_set_next_event() or shutdown clock after 247 * mode switching 248 */ 249 imxtm->gpt->gpt_irq_enable(imxtm); 250 251 return 0; 252 } 253 254 /* 255 * IRQ handler for the timer 256 */ 257 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) 258 { 259 struct clock_event_device *ced = dev_id; 260 struct imx_timer *imxtm = to_imx_timer(ced); 261 262 readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat); 263 264 imxtm->gpt->gpt_irq_acknowledge(imxtm); 265 266 ced->event_handler(ced); 267 268 return IRQ_HANDLED; 269 } 270 271 static int __init mxc_clockevent_init(struct imx_timer *imxtm) 272 { 273 struct clock_event_device *ced = &imxtm->ced; 274 275 ced->name = "mxc_timer1"; 276 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; 277 ced->set_state_shutdown = mxc_shutdown; 278 ced->set_state_oneshot = mxc_set_oneshot; 279 ced->tick_resume = mxc_shutdown; 280 ced->set_next_event = imxtm->gpt->set_next_event; 281 ced->rating = 200; 282 ced->cpumask = cpumask_of(0); 283 ced->irq = imxtm->irq; 284 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 285 0xff, 0xfffffffe); 286 287 return request_irq(imxtm->irq, mxc_timer_interrupt, 288 IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced); 289 } 290 291 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm) 292 { 293 u32 tctl_val; 294 295 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; 296 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 297 } 298 299 static void imx31_gpt_setup_tctl(struct imx_timer *imxtm) 300 { 301 u32 tctl_val; 302 303 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; 304 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) 305 tctl_val |= V2_TCTL_CLK_OSC_DIV8; 306 else 307 tctl_val |= V2_TCTL_CLK_PER; 308 309 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 310 } 311 312 static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm) 313 { 314 u32 tctl_val; 315 316 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; 317 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) { 318 tctl_val |= V2_TCTL_CLK_OSC_DIV8; 319 /* 24 / 8 = 3 MHz */ 320 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER); 321 tctl_val |= V2_TCTL_24MEN; 322 } else { 323 tctl_val |= V2_TCTL_CLK_PER; 324 } 325 326 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 327 } 328 329 static const struct imx_gpt_data imx1_gpt_data = { 330 .reg_tstat = MX1_2_TSTAT, 331 .reg_tcn = MX1_2_TCN, 332 .reg_tcmp = MX1_2_TCMP, 333 .gpt_irq_enable = imx1_gpt_irq_enable, 334 .gpt_irq_disable = imx1_gpt_irq_disable, 335 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge, 336 .gpt_setup_tctl = imx1_gpt_setup_tctl, 337 .set_next_event = mx1_2_set_next_event, 338 }; 339 340 static const struct imx_gpt_data imx21_gpt_data = { 341 .reg_tstat = MX1_2_TSTAT, 342 .reg_tcn = MX1_2_TCN, 343 .reg_tcmp = MX1_2_TCMP, 344 .gpt_irq_enable = imx1_gpt_irq_enable, 345 .gpt_irq_disable = imx1_gpt_irq_disable, 346 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge, 347 .gpt_setup_tctl = imx1_gpt_setup_tctl, 348 .set_next_event = mx1_2_set_next_event, 349 }; 350 351 static const struct imx_gpt_data imx31_gpt_data = { 352 .reg_tstat = V2_TSTAT, 353 .reg_tcn = V2_TCN, 354 .reg_tcmp = V2_TCMP, 355 .gpt_irq_enable = imx31_gpt_irq_enable, 356 .gpt_irq_disable = imx31_gpt_irq_disable, 357 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge, 358 .gpt_setup_tctl = imx31_gpt_setup_tctl, 359 .set_next_event = v2_set_next_event, 360 }; 361 362 static const struct imx_gpt_data imx6dl_gpt_data = { 363 .reg_tstat = V2_TSTAT, 364 .reg_tcn = V2_TCN, 365 .reg_tcmp = V2_TCMP, 366 .gpt_irq_enable = imx31_gpt_irq_enable, 367 .gpt_irq_disable = imx31_gpt_irq_disable, 368 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge, 369 .gpt_setup_tctl = imx6dl_gpt_setup_tctl, 370 .set_next_event = v2_set_next_event, 371 }; 372 373 static int __init _mxc_timer_init(struct imx_timer *imxtm) 374 { 375 int ret; 376 377 switch (imxtm->type) { 378 case GPT_TYPE_IMX1: 379 imxtm->gpt = &imx1_gpt_data; 380 break; 381 case GPT_TYPE_IMX21: 382 imxtm->gpt = &imx21_gpt_data; 383 break; 384 case GPT_TYPE_IMX31: 385 imxtm->gpt = &imx31_gpt_data; 386 break; 387 case GPT_TYPE_IMX6DL: 388 imxtm->gpt = &imx6dl_gpt_data; 389 break; 390 default: 391 return -EINVAL; 392 } 393 394 if (IS_ERR(imxtm->clk_per)) { 395 pr_err("i.MX timer: unable to get clk\n"); 396 return PTR_ERR(imxtm->clk_per); 397 } 398 399 if (!IS_ERR(imxtm->clk_ipg)) 400 clk_prepare_enable(imxtm->clk_ipg); 401 402 clk_prepare_enable(imxtm->clk_per); 403 404 /* 405 * Initialise to a known state (all timers off, and timing reset) 406 */ 407 408 writel_relaxed(0, imxtm->base + MXC_TCTL); 409 writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */ 410 411 imxtm->gpt->gpt_setup_tctl(imxtm); 412 413 /* init and register the timer to the framework */ 414 ret = mxc_clocksource_init(imxtm); 415 if (ret) 416 return ret; 417 418 return mxc_clockevent_init(imxtm); 419 } 420 421 static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) 422 { 423 struct imx_timer *imxtm; 424 static int initialized; 425 int ret; 426 427 /* Support one instance only */ 428 if (initialized) 429 return 0; 430 431 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); 432 if (!imxtm) 433 return -ENOMEM; 434 435 imxtm->base = of_iomap(np, 0); 436 if (!imxtm->base) { 437 ret = -ENXIO; 438 goto err_kfree; 439 } 440 441 imxtm->irq = irq_of_parse_and_map(np, 0); 442 if (imxtm->irq <= 0) { 443 ret = -EINVAL; 444 goto err_kfree; 445 } 446 447 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); 448 449 /* Try osc_per first, and fall back to per otherwise */ 450 imxtm->clk_per = of_clk_get_by_name(np, "osc_per"); 451 if (IS_ERR(imxtm->clk_per)) 452 imxtm->clk_per = of_clk_get_by_name(np, "per"); 453 454 imxtm->type = type; 455 456 ret = _mxc_timer_init(imxtm); 457 if (ret) 458 goto err_kfree; 459 460 initialized = 1; 461 462 return 0; 463 464 err_kfree: 465 kfree(imxtm); 466 return ret; 467 } 468 469 static int __init imx1_timer_init_dt(struct device_node *np) 470 { 471 return mxc_timer_init_dt(np, GPT_TYPE_IMX1); 472 } 473 474 static int __init imx21_timer_init_dt(struct device_node *np) 475 { 476 return mxc_timer_init_dt(np, GPT_TYPE_IMX21); 477 } 478 479 static int __init imx31_timer_init_dt(struct device_node *np) 480 { 481 enum imx_gpt_type type = GPT_TYPE_IMX31; 482 483 /* 484 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S 485 * GPT device, while they actually have different programming model. 486 * This is a workaround to keep the existing i.MX6DL/S DTBs continue 487 * working with the new kernel. 488 */ 489 if (of_machine_is_compatible("fsl,imx6dl")) 490 type = GPT_TYPE_IMX6DL; 491 492 return mxc_timer_init_dt(np, type); 493 } 494 495 static int __init imx6dl_timer_init_dt(struct device_node *np) 496 { 497 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); 498 } 499 500 TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 501 TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 502 TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt); 503 TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 504 TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 505 TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); 506 TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt); 507 TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt); 508 TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt); 509 TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt); 510 TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt); 511 TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt); 512