1 /* 2 * Copyright 2013 Freescale Semiconductor, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * clock driver for Freescale QorIQ SoCs. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/clk-provider.h> 15 #include <linux/fsl/guts.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of.h> 22 #include <linux/slab.h> 23 24 #define PLL_DIV1 0 25 #define PLL_DIV2 1 26 #define PLL_DIV3 2 27 #define PLL_DIV4 3 28 29 #define PLATFORM_PLL 0 30 #define CGA_PLL1 1 31 #define CGA_PLL2 2 32 #define CGA_PLL3 3 33 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ 34 #define CGB_PLL1 4 35 #define CGB_PLL2 5 36 37 struct clockgen_pll_div { 38 struct clk *clk; 39 char name[32]; 40 }; 41 42 struct clockgen_pll { 43 struct clockgen_pll_div div[4]; 44 }; 45 46 #define CLKSEL_VALID 1 47 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ 48 49 struct clockgen_sourceinfo { 50 u32 flags; /* CLKSEL_xxx */ 51 int pll; /* CGx_PLLn */ 52 int div; /* PLL_DIVn */ 53 }; 54 55 #define NUM_MUX_PARENTS 16 56 57 struct clockgen_muxinfo { 58 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; 59 }; 60 61 #define NUM_HWACCEL 5 62 #define NUM_CMUX 8 63 64 struct clockgen; 65 66 /* 67 * cmux freq must be >= platform pll. 68 * If not set, cmux freq must be >= platform pll/2 69 */ 70 #define CG_CMUX_GE_PLAT 1 71 72 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ 73 #define CG_VER3 4 /* version 3 cg: reg layout different */ 74 #define CG_LITTLE_ENDIAN 8 75 76 struct clockgen_chipinfo { 77 const char *compat, *guts_compat; 78 const struct clockgen_muxinfo *cmux_groups[2]; 79 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; 80 void (*init_periph)(struct clockgen *cg); 81 int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ 82 u32 pll_mask; /* 1 << n bit set if PLL n is valid */ 83 u32 flags; /* CG_xxx */ 84 }; 85 86 struct clockgen { 87 struct device_node *node; 88 void __iomem *regs; 89 struct clockgen_chipinfo info; /* mutable copy */ 90 struct clk *sysclk; 91 struct clockgen_pll pll[6]; 92 struct clk *cmux[NUM_CMUX]; 93 struct clk *hwaccel[NUM_HWACCEL]; 94 struct clk *fman[2]; 95 struct ccsr_guts __iomem *guts; 96 }; 97 98 static struct clockgen clockgen; 99 100 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) 101 { 102 if (cg->info.flags & CG_LITTLE_ENDIAN) 103 iowrite32(val, reg); 104 else 105 iowrite32be(val, reg); 106 } 107 108 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) 109 { 110 u32 val; 111 112 if (cg->info.flags & CG_LITTLE_ENDIAN) 113 val = ioread32(reg); 114 else 115 val = ioread32be(reg); 116 117 return val; 118 } 119 120 static const struct clockgen_muxinfo p2041_cmux_grp1 = { 121 { 122 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 123 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 124 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 125 } 126 }; 127 128 static const struct clockgen_muxinfo p2041_cmux_grp2 = { 129 { 130 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 131 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 132 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 133 } 134 }; 135 136 static const struct clockgen_muxinfo p5020_cmux_grp1 = { 137 { 138 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 139 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 140 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 141 } 142 }; 143 144 static const struct clockgen_muxinfo p5020_cmux_grp2 = { 145 { 146 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 147 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 148 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 149 } 150 }; 151 152 static const struct clockgen_muxinfo p5040_cmux_grp1 = { 153 { 154 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 155 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 156 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, 157 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, 158 } 159 }; 160 161 static const struct clockgen_muxinfo p5040_cmux_grp2 = { 162 { 163 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 164 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, 165 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 166 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 167 } 168 }; 169 170 static const struct clockgen_muxinfo p4080_cmux_grp1 = { 171 { 172 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 173 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 174 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 175 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 176 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, 177 } 178 }; 179 180 static const struct clockgen_muxinfo p4080_cmux_grp2 = { 181 { 182 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, 183 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 184 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 185 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, 186 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, 187 } 188 }; 189 190 static const struct clockgen_muxinfo t1023_cmux = { 191 { 192 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 193 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 194 } 195 }; 196 197 static const struct clockgen_muxinfo t1040_cmux = { 198 { 199 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 200 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 201 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 202 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 203 } 204 }; 205 206 207 static const struct clockgen_muxinfo clockgen2_cmux_cga = { 208 { 209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 211 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 212 {}, 213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 215 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 216 {}, 217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, 218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, 219 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, 220 }, 221 }; 222 223 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { 224 { 225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 227 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 228 {}, 229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 231 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 232 }, 233 }; 234 235 static const struct clockgen_muxinfo clockgen2_cmux_cgb = { 236 { 237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, 238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 239 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 240 {}, 241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, 242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 243 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 244 }, 245 }; 246 247 static const struct clockgen_muxinfo ls1043a_hwa1 = { 248 { 249 {}, 250 {}, 251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 253 {}, 254 {}, 255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 256 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 257 }, 258 }; 259 260 static const struct clockgen_muxinfo ls1043a_hwa2 = { 261 { 262 {}, 263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 264 {}, 265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 266 }, 267 }; 268 269 static const struct clockgen_muxinfo t1023_hwa1 = { 270 { 271 {}, 272 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 273 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 274 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 275 }, 276 }; 277 278 static const struct clockgen_muxinfo t1023_hwa2 = { 279 { 280 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 281 }, 282 }; 283 284 static const struct clockgen_muxinfo t2080_hwa1 = { 285 { 286 {}, 287 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 288 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 289 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 291 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 292 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 293 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 294 }, 295 }; 296 297 static const struct clockgen_muxinfo t2080_hwa2 = { 298 { 299 {}, 300 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, 301 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 302 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 303 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, 304 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 307 }, 308 }; 309 310 static const struct clockgen_muxinfo t4240_hwa1 = { 311 { 312 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, 313 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, 314 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, 315 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, 316 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, 317 {}, 318 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, 319 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, 320 }, 321 }; 322 323 static const struct clockgen_muxinfo t4240_hwa4 = { 324 { 325 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 326 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 327 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, 328 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 329 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 330 }, 331 }; 332 333 static const struct clockgen_muxinfo t4240_hwa5 = { 334 { 335 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, 336 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, 337 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, 338 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, 339 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, 340 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, 341 }, 342 }; 343 344 #define RCWSR7_FM1_CLK_SEL 0x40000000 345 #define RCWSR7_FM2_CLK_SEL 0x20000000 346 #define RCWSR7_HWA_ASYNC_DIV 0x04000000 347 348 static void __init p2041_init_periph(struct clockgen *cg) 349 { 350 u32 reg; 351 352 reg = ioread32be(&cg->guts->rcwsr[7]); 353 354 if (reg & RCWSR7_FM1_CLK_SEL) 355 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; 356 else 357 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 358 } 359 360 static void __init p4080_init_periph(struct clockgen *cg) 361 { 362 u32 reg; 363 364 reg = ioread32be(&cg->guts->rcwsr[7]); 365 366 if (reg & RCWSR7_FM1_CLK_SEL) 367 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 368 else 369 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 370 371 if (reg & RCWSR7_FM2_CLK_SEL) 372 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; 373 else 374 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 375 } 376 377 static void __init p5020_init_periph(struct clockgen *cg) 378 { 379 u32 reg; 380 int div = PLL_DIV2; 381 382 reg = ioread32be(&cg->guts->rcwsr[7]); 383 if (reg & RCWSR7_HWA_ASYNC_DIV) 384 div = PLL_DIV4; 385 386 if (reg & RCWSR7_FM1_CLK_SEL) 387 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; 388 else 389 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 390 } 391 392 static void __init p5040_init_periph(struct clockgen *cg) 393 { 394 u32 reg; 395 int div = PLL_DIV2; 396 397 reg = ioread32be(&cg->guts->rcwsr[7]); 398 if (reg & RCWSR7_HWA_ASYNC_DIV) 399 div = PLL_DIV4; 400 401 if (reg & RCWSR7_FM1_CLK_SEL) 402 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; 403 else 404 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 405 406 if (reg & RCWSR7_FM2_CLK_SEL) 407 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; 408 else 409 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; 410 } 411 412 static void __init t1023_init_periph(struct clockgen *cg) 413 { 414 cg->fman[0] = cg->hwaccel[1]; 415 } 416 417 static void __init t1040_init_periph(struct clockgen *cg) 418 { 419 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; 420 } 421 422 static void __init t2080_init_periph(struct clockgen *cg) 423 { 424 cg->fman[0] = cg->hwaccel[0]; 425 } 426 427 static void __init t4240_init_periph(struct clockgen *cg) 428 { 429 cg->fman[0] = cg->hwaccel[3]; 430 cg->fman[1] = cg->hwaccel[4]; 431 } 432 433 static const struct clockgen_chipinfo chipinfo[] = { 434 { 435 .compat = "fsl,b4420-clockgen", 436 .guts_compat = "fsl,b4860-device-config", 437 .init_periph = t2080_init_periph, 438 .cmux_groups = { 439 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 440 }, 441 .hwaccel = { 442 &t2080_hwa1 443 }, 444 .cmux_to_group = { 445 0, 1, 1, 1, -1 446 }, 447 .pll_mask = 0x3f, 448 .flags = CG_PLL_8BIT, 449 }, 450 { 451 .compat = "fsl,b4860-clockgen", 452 .guts_compat = "fsl,b4860-device-config", 453 .init_periph = t2080_init_periph, 454 .cmux_groups = { 455 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 456 }, 457 .hwaccel = { 458 &t2080_hwa1 459 }, 460 .cmux_to_group = { 461 0, 1, 1, 1, -1 462 }, 463 .pll_mask = 0x3f, 464 .flags = CG_PLL_8BIT, 465 }, 466 { 467 .compat = "fsl,ls1021a-clockgen", 468 .cmux_groups = { 469 &t1023_cmux 470 }, 471 .cmux_to_group = { 472 0, -1 473 }, 474 .pll_mask = 0x03, 475 }, 476 { 477 .compat = "fsl,ls1043a-clockgen", 478 .init_periph = t2080_init_periph, 479 .cmux_groups = { 480 &t1040_cmux 481 }, 482 .hwaccel = { 483 &ls1043a_hwa1, &ls1043a_hwa2 484 }, 485 .cmux_to_group = { 486 0, -1 487 }, 488 .pll_mask = 0x07, 489 .flags = CG_PLL_8BIT, 490 }, 491 { 492 .compat = "fsl,ls2080a-clockgen", 493 .cmux_groups = { 494 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb 495 }, 496 .cmux_to_group = { 497 0, 0, 1, 1, -1 498 }, 499 .pll_mask = 0x37, 500 .flags = CG_VER3 | CG_LITTLE_ENDIAN, 501 }, 502 { 503 .compat = "fsl,p2041-clockgen", 504 .guts_compat = "fsl,qoriq-device-config-1.0", 505 .init_periph = p2041_init_periph, 506 .cmux_groups = { 507 &p2041_cmux_grp1, &p2041_cmux_grp2 508 }, 509 .cmux_to_group = { 510 0, 0, 1, 1, -1 511 }, 512 .pll_mask = 0x07, 513 }, 514 { 515 .compat = "fsl,p3041-clockgen", 516 .guts_compat = "fsl,qoriq-device-config-1.0", 517 .init_periph = p2041_init_periph, 518 .cmux_groups = { 519 &p2041_cmux_grp1, &p2041_cmux_grp2 520 }, 521 .cmux_to_group = { 522 0, 0, 1, 1, -1 523 }, 524 .pll_mask = 0x07, 525 }, 526 { 527 .compat = "fsl,p4080-clockgen", 528 .guts_compat = "fsl,qoriq-device-config-1.0", 529 .init_periph = p4080_init_periph, 530 .cmux_groups = { 531 &p4080_cmux_grp1, &p4080_cmux_grp2 532 }, 533 .cmux_to_group = { 534 0, 0, 0, 0, 1, 1, 1, 1 535 }, 536 .pll_mask = 0x1f, 537 }, 538 { 539 .compat = "fsl,p5020-clockgen", 540 .guts_compat = "fsl,qoriq-device-config-1.0", 541 .init_periph = p5020_init_periph, 542 .cmux_groups = { 543 &p2041_cmux_grp1, &p2041_cmux_grp2 544 }, 545 .cmux_to_group = { 546 0, 1, -1 547 }, 548 .pll_mask = 0x07, 549 }, 550 { 551 .compat = "fsl,p5040-clockgen", 552 .guts_compat = "fsl,p5040-device-config", 553 .init_periph = p5040_init_periph, 554 .cmux_groups = { 555 &p5040_cmux_grp1, &p5040_cmux_grp2 556 }, 557 .cmux_to_group = { 558 0, 0, 1, 1, -1 559 }, 560 .pll_mask = 0x0f, 561 }, 562 { 563 .compat = "fsl,t1023-clockgen", 564 .guts_compat = "fsl,t1023-device-config", 565 .init_periph = t1023_init_periph, 566 .cmux_groups = { 567 &t1023_cmux 568 }, 569 .hwaccel = { 570 &t1023_hwa1, &t1023_hwa2 571 }, 572 .cmux_to_group = { 573 0, 0, -1 574 }, 575 .pll_mask = 0x03, 576 .flags = CG_PLL_8BIT, 577 }, 578 { 579 .compat = "fsl,t1040-clockgen", 580 .guts_compat = "fsl,t1040-device-config", 581 .init_periph = t1040_init_periph, 582 .cmux_groups = { 583 &t1040_cmux 584 }, 585 .cmux_to_group = { 586 0, 0, 0, 0, -1 587 }, 588 .pll_mask = 0x07, 589 .flags = CG_PLL_8BIT, 590 }, 591 { 592 .compat = "fsl,t2080-clockgen", 593 .guts_compat = "fsl,t2080-device-config", 594 .init_periph = t2080_init_periph, 595 .cmux_groups = { 596 &clockgen2_cmux_cga12 597 }, 598 .hwaccel = { 599 &t2080_hwa1, &t2080_hwa2 600 }, 601 .cmux_to_group = { 602 0, -1 603 }, 604 .pll_mask = 0x07, 605 .flags = CG_PLL_8BIT, 606 }, 607 { 608 .compat = "fsl,t4240-clockgen", 609 .guts_compat = "fsl,t4240-device-config", 610 .init_periph = t4240_init_periph, 611 .cmux_groups = { 612 &clockgen2_cmux_cga, &clockgen2_cmux_cgb 613 }, 614 .hwaccel = { 615 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 616 }, 617 .cmux_to_group = { 618 0, 0, 1, -1 619 }, 620 .pll_mask = 0x3f, 621 .flags = CG_PLL_8BIT, 622 }, 623 {}, 624 }; 625 626 struct mux_hwclock { 627 struct clk_hw hw; 628 struct clockgen *cg; 629 const struct clockgen_muxinfo *info; 630 u32 __iomem *reg; 631 u8 parent_to_clksel[NUM_MUX_PARENTS]; 632 s8 clksel_to_parent[NUM_MUX_PARENTS]; 633 int num_parents; 634 }; 635 636 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) 637 #define CLKSEL_MASK 0x78000000 638 #define CLKSEL_SHIFT 27 639 640 static int mux_set_parent(struct clk_hw *hw, u8 idx) 641 { 642 struct mux_hwclock *hwc = to_mux_hwclock(hw); 643 u32 clksel; 644 645 if (idx >= hwc->num_parents) 646 return -EINVAL; 647 648 clksel = hwc->parent_to_clksel[idx]; 649 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); 650 651 return 0; 652 } 653 654 static u8 mux_get_parent(struct clk_hw *hw) 655 { 656 struct mux_hwclock *hwc = to_mux_hwclock(hw); 657 u32 clksel; 658 s8 ret; 659 660 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 661 662 ret = hwc->clksel_to_parent[clksel]; 663 if (ret < 0) { 664 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); 665 return 0; 666 } 667 668 return ret; 669 } 670 671 static const struct clk_ops cmux_ops = { 672 .get_parent = mux_get_parent, 673 .set_parent = mux_set_parent, 674 }; 675 676 /* 677 * Don't allow setting for now, as the clock options haven't been 678 * sanitized for additional restrictions. 679 */ 680 static const struct clk_ops hwaccel_ops = { 681 .get_parent = mux_get_parent, 682 }; 683 684 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, 685 struct mux_hwclock *hwc, 686 int idx) 687 { 688 int pll, div; 689 690 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) 691 return NULL; 692 693 pll = hwc->info->clksel[idx].pll; 694 div = hwc->info->clksel[idx].div; 695 696 return &cg->pll[pll].div[div]; 697 } 698 699 static struct clk * __init create_mux_common(struct clockgen *cg, 700 struct mux_hwclock *hwc, 701 const struct clk_ops *ops, 702 unsigned long min_rate, 703 unsigned long pct80_rate, 704 const char *fmt, int idx) 705 { 706 struct clk_init_data init = {}; 707 struct clk *clk; 708 const struct clockgen_pll_div *div; 709 const char *parent_names[NUM_MUX_PARENTS]; 710 char name[32]; 711 int i, j; 712 713 snprintf(name, sizeof(name), fmt, idx); 714 715 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { 716 unsigned long rate; 717 718 hwc->clksel_to_parent[i] = -1; 719 720 div = get_pll_div(cg, hwc, i); 721 if (!div) 722 continue; 723 724 rate = clk_get_rate(div->clk); 725 726 if (hwc->info->clksel[i].flags & CLKSEL_80PCT && 727 rate > pct80_rate) 728 continue; 729 if (rate < min_rate) 730 continue; 731 732 parent_names[j] = div->name; 733 hwc->parent_to_clksel[j] = i; 734 hwc->clksel_to_parent[i] = j; 735 j++; 736 } 737 738 init.name = name; 739 init.ops = ops; 740 init.parent_names = parent_names; 741 init.num_parents = hwc->num_parents = j; 742 init.flags = 0; 743 hwc->hw.init = &init; 744 hwc->cg = cg; 745 746 clk = clk_register(NULL, &hwc->hw); 747 if (IS_ERR(clk)) { 748 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 749 PTR_ERR(clk)); 750 kfree(hwc); 751 return NULL; 752 } 753 754 return clk; 755 } 756 757 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) 758 { 759 struct mux_hwclock *hwc; 760 const struct clockgen_pll_div *div; 761 unsigned long plat_rate, min_rate; 762 u64 pct80_rate; 763 u32 clksel; 764 765 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 766 if (!hwc) 767 return NULL; 768 769 hwc->reg = cg->regs + 0x20 * idx; 770 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; 771 772 /* 773 * Find the rate for the default clksel, and treat it as the 774 * maximum rated core frequency. If this is an incorrect 775 * assumption, certain clock options (possibly including the 776 * default clksel) may be inappropriately excluded on certain 777 * chips. 778 */ 779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 780 div = get_pll_div(cg, hwc, clksel); 781 if (!div) { 782 kfree(hwc); 783 return NULL; 784 } 785 786 pct80_rate = clk_get_rate(div->clk); 787 pct80_rate *= 8; 788 do_div(pct80_rate, 10); 789 790 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); 791 792 if (cg->info.flags & CG_CMUX_GE_PLAT) 793 min_rate = plat_rate; 794 else 795 min_rate = plat_rate / 2; 796 797 return create_mux_common(cg, hwc, &cmux_ops, min_rate, 798 pct80_rate, "cg-cmux%d", idx); 799 } 800 801 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) 802 { 803 struct mux_hwclock *hwc; 804 805 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); 806 if (!hwc) 807 return NULL; 808 809 hwc->reg = cg->regs + 0x20 * idx + 0x10; 810 hwc->info = cg->info.hwaccel[idx]; 811 812 return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, 813 "cg-hwaccel%d", idx); 814 } 815 816 static void __init create_muxes(struct clockgen *cg) 817 { 818 int i; 819 820 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { 821 if (cg->info.cmux_to_group[i] < 0) 822 break; 823 if (cg->info.cmux_to_group[i] >= 824 ARRAY_SIZE(cg->info.cmux_groups)) { 825 WARN_ON_ONCE(1); 826 continue; 827 } 828 829 cg->cmux[i] = create_one_cmux(cg, i); 830 } 831 832 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { 833 if (!cg->info.hwaccel[i]) 834 continue; 835 836 cg->hwaccel[i] = create_one_hwaccel(cg, i); 837 } 838 } 839 840 static void __init clockgen_init(struct device_node *np); 841 842 /* Legacy nodes may get probed before the parent clockgen node */ 843 static void __init legacy_init_clockgen(struct device_node *np) 844 { 845 if (!clockgen.node) 846 clockgen_init(of_get_parent(np)); 847 } 848 849 /* Legacy node */ 850 static void __init core_mux_init(struct device_node *np) 851 { 852 struct clk *clk; 853 struct resource res; 854 int idx, rc; 855 856 legacy_init_clockgen(np); 857 858 if (of_address_to_resource(np, 0, &res)) 859 return; 860 861 idx = (res.start & 0xf0) >> 5; 862 clk = clockgen.cmux[idx]; 863 864 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); 865 if (rc) { 866 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 867 __func__, np->name, rc); 868 return; 869 } 870 } 871 872 static struct clk *sysclk_from_fixed(struct device_node *node, const char *name) 873 { 874 u32 rate; 875 876 if (of_property_read_u32(node, "clock-frequency", &rate)) 877 return ERR_PTR(-ENODEV); 878 879 return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); 880 } 881 882 static struct clk *sysclk_from_parent(const char *name) 883 { 884 struct clk *clk; 885 const char *parent_name; 886 887 clk = of_clk_get(clockgen.node, 0); 888 if (IS_ERR(clk)) 889 return clk; 890 891 /* Register the input clock under the desired name. */ 892 parent_name = __clk_get_name(clk); 893 clk = clk_register_fixed_factor(NULL, name, parent_name, 894 0, 1, 1); 895 if (IS_ERR(clk)) 896 pr_err("%s: Couldn't register %s: %ld\n", __func__, name, 897 PTR_ERR(clk)); 898 899 return clk; 900 } 901 902 static struct clk * __init create_sysclk(const char *name) 903 { 904 struct device_node *sysclk; 905 struct clk *clk; 906 907 clk = sysclk_from_fixed(clockgen.node, name); 908 if (!IS_ERR(clk)) 909 return clk; 910 911 clk = sysclk_from_parent(name); 912 if (!IS_ERR(clk)) 913 return clk; 914 915 sysclk = of_get_child_by_name(clockgen.node, "sysclk"); 916 if (sysclk) { 917 clk = sysclk_from_fixed(sysclk, name); 918 if (!IS_ERR(clk)) 919 return clk; 920 } 921 922 pr_err("%s: No input clock\n", __func__); 923 return NULL; 924 } 925 926 /* Legacy node */ 927 static void __init sysclk_init(struct device_node *node) 928 { 929 struct clk *clk; 930 931 legacy_init_clockgen(node); 932 933 clk = clockgen.sysclk; 934 if (clk) 935 of_clk_add_provider(node, of_clk_src_simple_get, clk); 936 } 937 938 #define PLL_KILL BIT(31) 939 940 static void __init create_one_pll(struct clockgen *cg, int idx) 941 { 942 u32 __iomem *reg; 943 u32 mult; 944 struct clockgen_pll *pll = &cg->pll[idx]; 945 int i; 946 947 if (!(cg->info.pll_mask & (1 << idx))) 948 return; 949 950 if (cg->info.flags & CG_VER3) { 951 switch (idx) { 952 case PLATFORM_PLL: 953 reg = cg->regs + 0x60080; 954 break; 955 case CGA_PLL1: 956 reg = cg->regs + 0x80; 957 break; 958 case CGA_PLL2: 959 reg = cg->regs + 0xa0; 960 break; 961 case CGB_PLL1: 962 reg = cg->regs + 0x10080; 963 break; 964 case CGB_PLL2: 965 reg = cg->regs + 0x100a0; 966 break; 967 default: 968 WARN_ONCE(1, "index %d\n", idx); 969 return; 970 } 971 } else { 972 if (idx == PLATFORM_PLL) 973 reg = cg->regs + 0xc00; 974 else 975 reg = cg->regs + 0x800 + 0x20 * (idx - 1); 976 } 977 978 /* Get the multiple of PLL */ 979 mult = cg_in(cg, reg); 980 981 /* Check if this PLL is disabled */ 982 if (mult & PLL_KILL) { 983 pr_debug("%s(): pll %p disabled\n", __func__, reg); 984 return; 985 } 986 987 if ((cg->info.flags & CG_VER3) || 988 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) 989 mult = (mult & GENMASK(8, 1)) >> 1; 990 else 991 mult = (mult & GENMASK(6, 1)) >> 1; 992 993 for (i = 0; i < ARRAY_SIZE(pll->div); i++) { 994 struct clk *clk; 995 996 snprintf(pll->div[i].name, sizeof(pll->div[i].name), 997 "cg-pll%d-div%d", idx, i + 1); 998 999 clk = clk_register_fixed_factor(NULL, 1000 pll->div[i].name, "cg-sysclk", 0, mult, i + 1); 1001 if (IS_ERR(clk)) { 1002 pr_err("%s: %s: register failed %ld\n", 1003 __func__, pll->div[i].name, PTR_ERR(clk)); 1004 continue; 1005 } 1006 1007 pll->div[i].clk = clk; 1008 } 1009 } 1010 1011 static void __init create_plls(struct clockgen *cg) 1012 { 1013 int i; 1014 1015 for (i = 0; i < ARRAY_SIZE(cg->pll); i++) 1016 create_one_pll(cg, i); 1017 } 1018 1019 static void __init legacy_pll_init(struct device_node *np, int idx) 1020 { 1021 struct clockgen_pll *pll; 1022 struct clk_onecell_data *onecell_data; 1023 struct clk **subclks; 1024 int count, rc; 1025 1026 legacy_init_clockgen(np); 1027 1028 pll = &clockgen.pll[idx]; 1029 count = of_property_count_strings(np, "clock-output-names"); 1030 1031 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); 1032 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); 1033 if (!subclks) 1034 return; 1035 1036 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); 1037 if (!onecell_data) 1038 goto err_clks; 1039 1040 if (count <= 3) { 1041 subclks[0] = pll->div[0].clk; 1042 subclks[1] = pll->div[1].clk; 1043 subclks[2] = pll->div[3].clk; 1044 } else { 1045 subclks[0] = pll->div[0].clk; 1046 subclks[1] = pll->div[1].clk; 1047 subclks[2] = pll->div[2].clk; 1048 subclks[3] = pll->div[3].clk; 1049 } 1050 1051 onecell_data->clks = subclks; 1052 onecell_data->clk_num = count; 1053 1054 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); 1055 if (rc) { 1056 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 1057 __func__, np->name, rc); 1058 goto err_cell; 1059 } 1060 1061 return; 1062 err_cell: 1063 kfree(onecell_data); 1064 err_clks: 1065 kfree(subclks); 1066 } 1067 1068 /* Legacy node */ 1069 static void __init pltfrm_pll_init(struct device_node *np) 1070 { 1071 legacy_pll_init(np, PLATFORM_PLL); 1072 } 1073 1074 /* Legacy node */ 1075 static void __init core_pll_init(struct device_node *np) 1076 { 1077 struct resource res; 1078 int idx; 1079 1080 if (of_address_to_resource(np, 0, &res)) 1081 return; 1082 1083 if ((res.start & 0xfff) == 0xc00) { 1084 /* 1085 * ls1021a devtree labels the platform PLL 1086 * with the core PLL compatible 1087 */ 1088 pltfrm_pll_init(np); 1089 } else { 1090 idx = (res.start & 0xf0) >> 5; 1091 legacy_pll_init(np, CGA_PLL1 + idx); 1092 } 1093 } 1094 1095 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) 1096 { 1097 struct clockgen *cg = data; 1098 struct clk *clk; 1099 struct clockgen_pll *pll; 1100 u32 type, idx; 1101 1102 if (clkspec->args_count < 2) { 1103 pr_err("%s: insufficient phandle args\n", __func__); 1104 return ERR_PTR(-EINVAL); 1105 } 1106 1107 type = clkspec->args[0]; 1108 idx = clkspec->args[1]; 1109 1110 switch (type) { 1111 case 0: 1112 if (idx != 0) 1113 goto bad_args; 1114 clk = cg->sysclk; 1115 break; 1116 case 1: 1117 if (idx >= ARRAY_SIZE(cg->cmux)) 1118 goto bad_args; 1119 clk = cg->cmux[idx]; 1120 break; 1121 case 2: 1122 if (idx >= ARRAY_SIZE(cg->hwaccel)) 1123 goto bad_args; 1124 clk = cg->hwaccel[idx]; 1125 break; 1126 case 3: 1127 if (idx >= ARRAY_SIZE(cg->fman)) 1128 goto bad_args; 1129 clk = cg->fman[idx]; 1130 break; 1131 case 4: 1132 pll = &cg->pll[PLATFORM_PLL]; 1133 if (idx >= ARRAY_SIZE(pll->div)) 1134 goto bad_args; 1135 clk = pll->div[idx].clk; 1136 break; 1137 default: 1138 goto bad_args; 1139 } 1140 1141 if (!clk) 1142 return ERR_PTR(-ENOENT); 1143 return clk; 1144 1145 bad_args: 1146 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); 1147 return ERR_PTR(-EINVAL); 1148 } 1149 1150 #ifdef CONFIG_PPC 1151 #include <asm/mpc85xx.h> 1152 1153 static const u32 a4510_svrs[] __initconst = { 1154 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ 1155 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ 1156 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ 1157 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ 1158 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ 1159 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ 1160 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ 1161 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ 1162 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ 1163 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ 1164 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ 1165 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ 1166 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ 1167 }; 1168 1169 #define SVR_SECURITY 0x80000 /* The Security (E) bit */ 1170 1171 static bool __init has_erratum_a4510(void) 1172 { 1173 u32 svr = mfspr(SPRN_SVR); 1174 int i; 1175 1176 svr &= ~SVR_SECURITY; 1177 1178 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { 1179 if (svr == a4510_svrs[i]) 1180 return true; 1181 } 1182 1183 return false; 1184 } 1185 #else 1186 static bool __init has_erratum_a4510(void) 1187 { 1188 return false; 1189 } 1190 #endif 1191 1192 static void __init clockgen_init(struct device_node *np) 1193 { 1194 int i, ret; 1195 bool is_old_ls1021a = false; 1196 1197 /* May have already been called by a legacy probe */ 1198 if (clockgen.node) 1199 return; 1200 1201 clockgen.node = np; 1202 clockgen.regs = of_iomap(np, 0); 1203 if (!clockgen.regs && 1204 of_device_is_compatible(of_root, "fsl,ls1021a")) { 1205 /* Compatibility hack for old, broken device trees */ 1206 clockgen.regs = ioremap(0x1ee1000, 0x1000); 1207 is_old_ls1021a = true; 1208 } 1209 if (!clockgen.regs) { 1210 pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); 1211 return; 1212 } 1213 1214 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { 1215 if (of_device_is_compatible(np, chipinfo[i].compat)) 1216 break; 1217 if (is_old_ls1021a && 1218 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) 1219 break; 1220 } 1221 1222 if (i == ARRAY_SIZE(chipinfo)) { 1223 pr_err("%s: unknown clockgen node %s\n", __func__, 1224 np->full_name); 1225 goto err; 1226 } 1227 clockgen.info = chipinfo[i]; 1228 1229 if (clockgen.info.guts_compat) { 1230 struct device_node *guts; 1231 1232 guts = of_find_compatible_node(NULL, NULL, 1233 clockgen.info.guts_compat); 1234 if (guts) { 1235 clockgen.guts = of_iomap(guts, 0); 1236 if (!clockgen.guts) { 1237 pr_err("%s: Couldn't map %s regs\n", __func__, 1238 guts->full_name); 1239 } 1240 } 1241 1242 } 1243 1244 if (has_erratum_a4510()) 1245 clockgen.info.flags |= CG_CMUX_GE_PLAT; 1246 1247 clockgen.sysclk = create_sysclk("cg-sysclk"); 1248 create_plls(&clockgen); 1249 create_muxes(&clockgen); 1250 1251 if (clockgen.info.init_periph) 1252 clockgen.info.init_periph(&clockgen); 1253 1254 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); 1255 if (ret) { 1256 pr_err("%s: Couldn't register clk provider for node %s: %d\n", 1257 __func__, np->name, ret); 1258 } 1259 1260 return; 1261 err: 1262 iounmap(clockgen.regs); 1263 clockgen.regs = NULL; 1264 } 1265 1266 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); 1267 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); 1268 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); 1269 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); 1270 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); 1271 1272 /* Legacy nodes */ 1273 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); 1274 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); 1275 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); 1276 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); 1277 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); 1278 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); 1279 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); 1280 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); 1281