1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/clk.h> 4 #include <linux/clkdev.h> 5 #include <linux/clk-provider.h> 6 #include <linux/delay.h> 7 #include <linux/err.h> 8 #include <linux/io.h> 9 #include <linux/math64.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 #include <linux/property.h> 14 #include <linux/string.h> 15 16 #define ADPLL_PLLSS_MMR_LOCK_OFFSET 0x00 /* Managed by MPPULL */ 17 #define ADPLL_PLLSS_MMR_LOCK_ENABLED 0x1f125B64 18 #define ADPLL_PLLSS_MMR_UNLOCK_MAGIC 0x1eda4c3d 19 20 #define ADPLL_PWRCTRL_OFFSET 0x00 21 #define ADPLL_PWRCTRL_PONIN 5 22 #define ADPLL_PWRCTRL_PGOODIN 4 23 #define ADPLL_PWRCTRL_RET 3 24 #define ADPLL_PWRCTRL_ISORET 2 25 #define ADPLL_PWRCTRL_ISOSCAN 1 26 #define ADPLL_PWRCTRL_OFFMODE 0 27 28 #define ADPLL_CLKCTRL_OFFSET 0x04 29 #define ADPLL_CLKCTRL_CLKDCOLDOEN 29 30 #define ADPLL_CLKCTRL_IDLE 23 31 #define ADPLL_CLKCTRL_CLKOUTEN 20 32 #define ADPLL_CLKINPHIFSEL_ADPLL_S 19 /* REVISIT: which bit? */ 33 #define ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ 19 34 #define ADPLL_CLKCTRL_ULOWCLKEN 18 35 #define ADPLL_CLKCTRL_CLKDCOLDOPWDNZ 17 36 #define ADPLL_CLKCTRL_M2PWDNZ 16 37 #define ADPLL_CLKCTRL_M3PWDNZ_ADPLL_S 15 38 #define ADPLL_CLKCTRL_LOWCURRSTDBY_ADPLL_S 13 39 #define ADPLL_CLKCTRL_LPMODE_ADPLL_S 12 40 #define ADPLL_CLKCTRL_REGM4XEN_ADPLL_S 10 41 #define ADPLL_CLKCTRL_SELFREQDCO_ADPLL_LJ 10 42 #define ADPLL_CLKCTRL_TINITZ 0 43 44 #define ADPLL_TENABLE_OFFSET 0x08 45 #define ADPLL_TENABLEDIV_OFFSET 0x8c 46 47 #define ADPLL_M2NDIV_OFFSET 0x10 48 #define ADPLL_M2NDIV_M2 16 49 #define ADPLL_M2NDIV_M2_ADPLL_S_WIDTH 5 50 #define ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH 7 51 52 #define ADPLL_MN2DIV_OFFSET 0x14 53 #define ADPLL_MN2DIV_N2 16 54 55 #define ADPLL_FRACDIV_OFFSET 0x18 56 #define ADPLL_FRACDIV_REGSD 24 57 #define ADPLL_FRACDIV_FRACTIONALM 0 58 #define ADPLL_FRACDIV_FRACTIONALM_MASK 0x3ffff 59 60 #define ADPLL_BWCTRL_OFFSET 0x1c 61 #define ADPLL_BWCTRL_BWCONTROL 1 62 #define ADPLL_BWCTRL_BW_INCR_DECRZ 0 63 64 #define ADPLL_RESERVED_OFFSET 0x20 65 66 #define ADPLL_STATUS_OFFSET 0x24 67 #define ADPLL_STATUS_PONOUT 31 68 #define ADPLL_STATUS_PGOODOUT 30 69 #define ADPLL_STATUS_LDOPWDN 29 70 #define ADPLL_STATUS_RECAL_BSTATUS3 28 71 #define ADPLL_STATUS_RECAL_OPPIN 27 72 #define ADPLL_STATUS_PHASELOCK 10 73 #define ADPLL_STATUS_FREQLOCK 9 74 #define ADPLL_STATUS_BYPASSACK 8 75 #define ADPLL_STATUS_LOSSREF 6 76 #define ADPLL_STATUS_CLKOUTENACK 5 77 #define ADPLL_STATUS_LOCK2 4 78 #define ADPLL_STATUS_M2CHANGEACK 3 79 #define ADPLL_STATUS_HIGHJITTER 1 80 #define ADPLL_STATUS_BYPASS 0 81 #define ADPLL_STATUS_PREPARED_MASK (BIT(ADPLL_STATUS_PHASELOCK) | \ 82 BIT(ADPLL_STATUS_FREQLOCK)) 83 84 #define ADPLL_M3DIV_OFFSET 0x28 /* Only on MPUPLL */ 85 #define ADPLL_M3DIV_M3 0 86 #define ADPLL_M3DIV_M3_WIDTH 5 87 #define ADPLL_M3DIV_M3_MASK 0x1f 88 89 #define ADPLL_RAMPCTRL_OFFSET 0x2c /* Only on MPUPLL */ 90 #define ADPLL_RAMPCTRL_CLKRAMPLEVEL 19 91 #define ADPLL_RAMPCTRL_CLKRAMPRATE 16 92 #define ADPLL_RAMPCTRL_RELOCK_RAMP_EN 0 93 94 #define MAX_ADPLL_INPUTS 3 95 #define MAX_ADPLL_OUTPUTS 4 96 #define ADPLL_MAX_RETRIES 5 97 98 #define to_dco(_hw) container_of(_hw, struct ti_adpll_dco_data, hw) 99 #define to_adpll(_hw) container_of(_hw, struct ti_adpll_data, dco) 100 #define to_clkout(_hw) container_of(_hw, struct ti_adpll_clkout_data, hw) 101 102 enum ti_adpll_clocks { 103 TI_ADPLL_DCO, 104 TI_ADPLL_DCO_GATE, 105 TI_ADPLL_N2, 106 TI_ADPLL_M2, 107 TI_ADPLL_M2_GATE, 108 TI_ADPLL_BYPASS, 109 TI_ADPLL_HIF, 110 TI_ADPLL_DIV2, 111 TI_ADPLL_CLKOUT, 112 TI_ADPLL_CLKOUT2, 113 TI_ADPLL_M3, 114 }; 115 116 #define TI_ADPLL_NR_CLOCKS (TI_ADPLL_M3 + 1) 117 118 enum ti_adpll_inputs { 119 TI_ADPLL_CLKINP, 120 TI_ADPLL_CLKINPULOW, 121 TI_ADPLL_CLKINPHIF, 122 }; 123 124 enum ti_adpll_s_outputs { 125 TI_ADPLL_S_DCOCLKLDO, 126 TI_ADPLL_S_CLKOUT, 127 TI_ADPLL_S_CLKOUTX2, 128 TI_ADPLL_S_CLKOUTHIF, 129 }; 130 131 enum ti_adpll_lj_outputs { 132 TI_ADPLL_LJ_CLKDCOLDO, 133 TI_ADPLL_LJ_CLKOUT, 134 TI_ADPLL_LJ_CLKOUTLDO, 135 }; 136 137 struct ti_adpll_platform_data { 138 const bool is_type_s; 139 const int nr_max_inputs; 140 const int nr_max_outputs; 141 const int output_index; 142 }; 143 144 struct ti_adpll_clock { 145 struct clk *clk; 146 struct clk_lookup *cl; 147 void (*unregister)(struct clk *clk); 148 }; 149 150 struct ti_adpll_dco_data { 151 struct clk_hw hw; 152 }; 153 154 struct ti_adpll_clkout_data { 155 struct ti_adpll_data *adpll; 156 struct clk_gate gate; 157 struct clk_hw hw; 158 }; 159 160 struct ti_adpll_data { 161 struct device *dev; 162 const struct ti_adpll_platform_data *c; 163 struct device_node *np; 164 unsigned long pa; 165 void __iomem *iobase; 166 void __iomem *regs; 167 spinlock_t lock; /* For ADPLL shared register access */ 168 const char *parent_names[MAX_ADPLL_INPUTS]; 169 struct clk *parent_clocks[MAX_ADPLL_INPUTS]; 170 struct ti_adpll_clock *clocks; 171 struct clk_onecell_data outputs; 172 struct ti_adpll_dco_data dco; 173 }; 174 175 static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, 176 int output_index, 177 const char *postfix) 178 { 179 const char *name; 180 int err; 181 182 if (output_index >= 0) { 183 err = of_property_read_string_index(d->np, 184 "clock-output-names", 185 output_index, 186 &name); 187 if (err) 188 return NULL; 189 } else { 190 name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", 191 d->pa, postfix); 192 } 193 194 return name; 195 } 196 197 #define ADPLL_MAX_CON_ID 16 /* See MAX_CON_ID */ 198 199 static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock, 200 int index, int output_index, const char *name, 201 void (*unregister)(struct clk *clk)) 202 { 203 struct clk_lookup *cl; 204 const char *postfix = NULL; 205 char con_id[ADPLL_MAX_CON_ID]; 206 207 d->clocks[index].clk = clock; 208 d->clocks[index].unregister = unregister; 209 210 /* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */ 211 postfix = strrchr(name, '.'); 212 if (postfix && strlen(postfix) > 1) { 213 if (strlen(postfix) > ADPLL_MAX_CON_ID) 214 dev_warn(d->dev, "clock %s con_id lookup may fail\n", 215 name); 216 snprintf(con_id, 16, "pll%03lx%s", d->pa & 0xfff, postfix + 1); 217 cl = clkdev_create(clock, con_id, NULL); 218 if (!cl) 219 return -ENOMEM; 220 d->clocks[index].cl = cl; 221 } else { 222 dev_warn(d->dev, "no con_id for clock %s\n", name); 223 } 224 225 if (output_index < 0) 226 return 0; 227 228 d->outputs.clks[output_index] = clock; 229 d->outputs.clk_num++; 230 231 return 0; 232 } 233 234 static int ti_adpll_init_divider(struct ti_adpll_data *d, 235 enum ti_adpll_clocks index, 236 int output_index, char *name, 237 struct clk *parent_clock, 238 void __iomem *reg, 239 u8 shift, u8 width, 240 u8 clk_divider_flags) 241 { 242 const char *child_name; 243 const char *parent_name; 244 struct clk *clock; 245 246 child_name = ti_adpll_clk_get_name(d, output_index, name); 247 if (!child_name) 248 return -EINVAL; 249 250 parent_name = __clk_get_name(parent_clock); 251 clock = clk_register_divider(d->dev, child_name, parent_name, 0, 252 reg, shift, width, clk_divider_flags, 253 &d->lock); 254 if (IS_ERR(clock)) { 255 dev_err(d->dev, "failed to register divider %s: %li\n", 256 name, PTR_ERR(clock)); 257 return PTR_ERR(clock); 258 } 259 260 return ti_adpll_setup_clock(d, clock, index, output_index, child_name, 261 clk_unregister_divider); 262 } 263 264 static int ti_adpll_init_mux(struct ti_adpll_data *d, 265 enum ti_adpll_clocks index, 266 char *name, struct clk *clk0, 267 struct clk *clk1, 268 void __iomem *reg, 269 u8 shift) 270 { 271 const char *child_name; 272 const char *parents[2]; 273 struct clk *clock; 274 275 child_name = ti_adpll_clk_get_name(d, -ENODEV, name); 276 if (!child_name) 277 return -ENOMEM; 278 parents[0] = __clk_get_name(clk0); 279 parents[1] = __clk_get_name(clk1); 280 clock = clk_register_mux(d->dev, child_name, parents, 2, 0, 281 reg, shift, 1, 0, &d->lock); 282 if (IS_ERR(clock)) { 283 dev_err(d->dev, "failed to register mux %s: %li\n", 284 name, PTR_ERR(clock)); 285 return PTR_ERR(clock); 286 } 287 288 return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name, 289 clk_unregister_mux); 290 } 291 292 static int ti_adpll_init_gate(struct ti_adpll_data *d, 293 enum ti_adpll_clocks index, 294 int output_index, char *name, 295 struct clk *parent_clock, 296 void __iomem *reg, 297 u8 bit_idx, 298 u8 clk_gate_flags) 299 { 300 const char *child_name; 301 const char *parent_name; 302 struct clk *clock; 303 304 child_name = ti_adpll_clk_get_name(d, output_index, name); 305 if (!child_name) 306 return -EINVAL; 307 308 parent_name = __clk_get_name(parent_clock); 309 clock = clk_register_gate(d->dev, child_name, parent_name, 0, 310 reg, bit_idx, clk_gate_flags, 311 &d->lock); 312 if (IS_ERR(clock)) { 313 dev_err(d->dev, "failed to register gate %s: %li\n", 314 name, PTR_ERR(clock)); 315 return PTR_ERR(clock); 316 } 317 318 return ti_adpll_setup_clock(d, clock, index, output_index, child_name, 319 clk_unregister_gate); 320 } 321 322 static int ti_adpll_init_fixed_factor(struct ti_adpll_data *d, 323 enum ti_adpll_clocks index, 324 char *name, 325 struct clk *parent_clock, 326 unsigned int mult, 327 unsigned int div) 328 { 329 const char *child_name; 330 const char *parent_name; 331 struct clk *clock; 332 333 child_name = ti_adpll_clk_get_name(d, -ENODEV, name); 334 if (!child_name) 335 return -ENOMEM; 336 337 parent_name = __clk_get_name(parent_clock); 338 clock = clk_register_fixed_factor(d->dev, child_name, parent_name, 339 0, mult, div); 340 if (IS_ERR(clock)) 341 return PTR_ERR(clock); 342 343 return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name, 344 clk_unregister); 345 } 346 347 static void ti_adpll_set_idle_bypass(struct ti_adpll_data *d) 348 { 349 unsigned long flags; 350 u32 v; 351 352 spin_lock_irqsave(&d->lock, flags); 353 v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET); 354 v |= BIT(ADPLL_CLKCTRL_IDLE); 355 writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET); 356 spin_unlock_irqrestore(&d->lock, flags); 357 } 358 359 static void ti_adpll_clear_idle_bypass(struct ti_adpll_data *d) 360 { 361 unsigned long flags; 362 u32 v; 363 364 spin_lock_irqsave(&d->lock, flags); 365 v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET); 366 v &= ~BIT(ADPLL_CLKCTRL_IDLE); 367 writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET); 368 spin_unlock_irqrestore(&d->lock, flags); 369 } 370 371 static bool ti_adpll_clock_is_bypass(struct ti_adpll_data *d) 372 { 373 u32 v; 374 375 v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET); 376 377 return v & BIT(ADPLL_STATUS_BYPASS); 378 } 379 380 /* 381 * Locked and bypass are not actually mutually exclusive: if you only care 382 * about the DCO clock and not CLKOUT you can clear M2PWDNZ before enabling 383 * the PLL, resulting in status (FREQLOCK | PHASELOCK | BYPASS) after lock. 384 */ 385 static bool ti_adpll_is_locked(struct ti_adpll_data *d) 386 { 387 u32 v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET); 388 389 return (v & ADPLL_STATUS_PREPARED_MASK) == ADPLL_STATUS_PREPARED_MASK; 390 } 391 392 static int ti_adpll_wait_lock(struct ti_adpll_data *d) 393 { 394 int retries = ADPLL_MAX_RETRIES; 395 396 do { 397 if (ti_adpll_is_locked(d)) 398 return 0; 399 usleep_range(200, 300); 400 } while (retries--); 401 402 dev_err(d->dev, "pll failed to lock\n"); 403 return -ETIMEDOUT; 404 } 405 406 static int ti_adpll_prepare(struct clk_hw *hw) 407 { 408 struct ti_adpll_dco_data *dco = to_dco(hw); 409 struct ti_adpll_data *d = to_adpll(dco); 410 411 ti_adpll_clear_idle_bypass(d); 412 ti_adpll_wait_lock(d); 413 414 return 0; 415 } 416 417 static void ti_adpll_unprepare(struct clk_hw *hw) 418 { 419 struct ti_adpll_dco_data *dco = to_dco(hw); 420 struct ti_adpll_data *d = to_adpll(dco); 421 422 ti_adpll_set_idle_bypass(d); 423 } 424 425 static int ti_adpll_is_prepared(struct clk_hw *hw) 426 { 427 struct ti_adpll_dco_data *dco = to_dco(hw); 428 struct ti_adpll_data *d = to_adpll(dco); 429 430 return ti_adpll_is_locked(d); 431 } 432 433 /* 434 * Note that the DCO clock is never subject to bypass: if the PLL is off, 435 * dcoclk is low. 436 */ 437 static unsigned long ti_adpll_recalc_rate(struct clk_hw *hw, 438 unsigned long parent_rate) 439 { 440 struct ti_adpll_dco_data *dco = to_dco(hw); 441 struct ti_adpll_data *d = to_adpll(dco); 442 u32 frac_m, divider, v; 443 u64 rate; 444 unsigned long flags; 445 446 if (ti_adpll_clock_is_bypass(d)) 447 return 0; 448 449 spin_lock_irqsave(&d->lock, flags); 450 frac_m = readl_relaxed(d->regs + ADPLL_FRACDIV_OFFSET); 451 frac_m &= ADPLL_FRACDIV_FRACTIONALM_MASK; 452 rate = (u64)readw_relaxed(d->regs + ADPLL_MN2DIV_OFFSET) << 18; 453 rate += frac_m; 454 rate *= parent_rate; 455 divider = (readw_relaxed(d->regs + ADPLL_M2NDIV_OFFSET) + 1) << 18; 456 spin_unlock_irqrestore(&d->lock, flags); 457 458 do_div(rate, divider); 459 460 if (d->c->is_type_s) { 461 v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET); 462 if (v & BIT(ADPLL_CLKCTRL_REGM4XEN_ADPLL_S)) 463 rate *= 4; 464 rate *= 2; 465 } 466 467 return rate; 468 } 469 470 /* PLL parent is always clkinp, bypass only affects the children */ 471 static u8 ti_adpll_get_parent(struct clk_hw *hw) 472 { 473 return 0; 474 } 475 476 static const struct clk_ops ti_adpll_ops = { 477 .prepare = ti_adpll_prepare, 478 .unprepare = ti_adpll_unprepare, 479 .is_prepared = ti_adpll_is_prepared, 480 .recalc_rate = ti_adpll_recalc_rate, 481 .get_parent = ti_adpll_get_parent, 482 }; 483 484 static int ti_adpll_init_dco(struct ti_adpll_data *d) 485 { 486 struct clk_init_data init; 487 struct clk *clock; 488 const char *postfix; 489 int width, err; 490 491 d->outputs.clks = devm_kcalloc(d->dev, 492 MAX_ADPLL_OUTPUTS, 493 sizeof(struct clk *), 494 GFP_KERNEL); 495 if (!d->outputs.clks) 496 return -ENOMEM; 497 498 if (d->c->output_index < 0) 499 postfix = "dco"; 500 else 501 postfix = NULL; 502 503 init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix); 504 if (!init.name) 505 return -EINVAL; 506 507 init.parent_names = d->parent_names; 508 init.num_parents = d->c->nr_max_inputs; 509 init.ops = &ti_adpll_ops; 510 init.flags = CLK_GET_RATE_NOCACHE; 511 d->dco.hw.init = &init; 512 513 if (d->c->is_type_s) 514 width = 5; 515 else 516 width = 4; 517 518 /* Internal input clock divider N2 */ 519 err = ti_adpll_init_divider(d, TI_ADPLL_N2, -ENODEV, "n2", 520 d->parent_clocks[TI_ADPLL_CLKINP], 521 d->regs + ADPLL_MN2DIV_OFFSET, 522 ADPLL_MN2DIV_N2, width, 0); 523 if (err) 524 return err; 525 526 clock = devm_clk_register(d->dev, &d->dco.hw); 527 if (IS_ERR(clock)) 528 return PTR_ERR(clock); 529 530 return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index, 531 init.name, NULL); 532 } 533 534 static int ti_adpll_clkout_enable(struct clk_hw *hw) 535 { 536 struct ti_adpll_clkout_data *co = to_clkout(hw); 537 struct clk_hw *gate_hw = &co->gate.hw; 538 539 __clk_hw_set_clk(gate_hw, hw); 540 541 return clk_gate_ops.enable(gate_hw); 542 } 543 544 static void ti_adpll_clkout_disable(struct clk_hw *hw) 545 { 546 struct ti_adpll_clkout_data *co = to_clkout(hw); 547 struct clk_hw *gate_hw = &co->gate.hw; 548 549 __clk_hw_set_clk(gate_hw, hw); 550 clk_gate_ops.disable(gate_hw); 551 } 552 553 static int ti_adpll_clkout_is_enabled(struct clk_hw *hw) 554 { 555 struct ti_adpll_clkout_data *co = to_clkout(hw); 556 struct clk_hw *gate_hw = &co->gate.hw; 557 558 __clk_hw_set_clk(gate_hw, hw); 559 560 return clk_gate_ops.is_enabled(gate_hw); 561 } 562 563 /* Setting PLL bypass puts clkout and clkoutx2 into bypass */ 564 static u8 ti_adpll_clkout_get_parent(struct clk_hw *hw) 565 { 566 struct ti_adpll_clkout_data *co = to_clkout(hw); 567 struct ti_adpll_data *d = co->adpll; 568 569 return ti_adpll_clock_is_bypass(d); 570 } 571 572 static int ti_adpll_init_clkout(struct ti_adpll_data *d, 573 enum ti_adpll_clocks index, 574 int output_index, int gate_bit, 575 char *name, struct clk *clk0, 576 struct clk *clk1) 577 { 578 struct ti_adpll_clkout_data *co; 579 struct clk_init_data init; 580 struct clk_ops *ops; 581 const char *parent_names[2]; 582 const char *child_name; 583 struct clk *clock; 584 int err; 585 586 co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL); 587 if (!co) 588 return -ENOMEM; 589 co->adpll = d; 590 591 err = of_property_read_string_index(d->np, 592 "clock-output-names", 593 output_index, 594 &child_name); 595 if (err) 596 return err; 597 598 ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL); 599 if (!ops) 600 return -ENOMEM; 601 602 init.name = child_name; 603 init.ops = ops; 604 init.flags = 0; 605 co->hw.init = &init; 606 parent_names[0] = __clk_get_name(clk0); 607 parent_names[1] = __clk_get_name(clk1); 608 init.parent_names = parent_names; 609 init.num_parents = 2; 610 611 ops->get_parent = ti_adpll_clkout_get_parent; 612 ops->determine_rate = __clk_mux_determine_rate; 613 if (gate_bit) { 614 co->gate.lock = &d->lock; 615 co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET; 616 co->gate.bit_idx = gate_bit; 617 ops->enable = ti_adpll_clkout_enable; 618 ops->disable = ti_adpll_clkout_disable; 619 ops->is_enabled = ti_adpll_clkout_is_enabled; 620 } 621 622 clock = devm_clk_register(d->dev, &co->hw); 623 if (IS_ERR(clock)) { 624 dev_err(d->dev, "failed to register output %s: %li\n", 625 name, PTR_ERR(clock)); 626 return PTR_ERR(clock); 627 } 628 629 return ti_adpll_setup_clock(d, clock, index, output_index, child_name, 630 NULL); 631 } 632 633 static int ti_adpll_init_children_adpll_s(struct ti_adpll_data *d) 634 { 635 int err; 636 637 if (!d->c->is_type_s) 638 return 0; 639 640 /* Internal mux, sources from divider N2 or clkinpulow */ 641 err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass", 642 d->clocks[TI_ADPLL_N2].clk, 643 d->parent_clocks[TI_ADPLL_CLKINPULOW], 644 d->regs + ADPLL_CLKCTRL_OFFSET, 645 ADPLL_CLKCTRL_ULOWCLKEN); 646 if (err) 647 return err; 648 649 /* Internal divider M2, sources DCO */ 650 err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV, "m2", 651 d->clocks[TI_ADPLL_DCO].clk, 652 d->regs + ADPLL_M2NDIV_OFFSET, 653 ADPLL_M2NDIV_M2, 654 ADPLL_M2NDIV_M2_ADPLL_S_WIDTH, 655 CLK_DIVIDER_ONE_BASED); 656 if (err) 657 return err; 658 659 /* Internal fixed divider, after M2 before clkout */ 660 err = ti_adpll_init_fixed_factor(d, TI_ADPLL_DIV2, "div2", 661 d->clocks[TI_ADPLL_M2].clk, 662 1, 2); 663 if (err) 664 return err; 665 666 /* Output clkout with a mux and gate, sources from div2 or bypass */ 667 err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT, 668 ADPLL_CLKCTRL_CLKOUTEN, "clkout", 669 d->clocks[TI_ADPLL_DIV2].clk, 670 d->clocks[TI_ADPLL_BYPASS].clk); 671 if (err) 672 return err; 673 674 /* Output clkoutx2 with a mux and gate, sources from M2 or bypass */ 675 err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT2, TI_ADPLL_S_CLKOUTX2, 0, 676 "clkout2", d->clocks[TI_ADPLL_M2].clk, 677 d->clocks[TI_ADPLL_BYPASS].clk); 678 if (err) 679 return err; 680 681 /* Internal mux, sources from DCO and clkinphif */ 682 if (d->parent_clocks[TI_ADPLL_CLKINPHIF]) { 683 err = ti_adpll_init_mux(d, TI_ADPLL_HIF, "hif", 684 d->clocks[TI_ADPLL_DCO].clk, 685 d->parent_clocks[TI_ADPLL_CLKINPHIF], 686 d->regs + ADPLL_CLKCTRL_OFFSET, 687 ADPLL_CLKINPHIFSEL_ADPLL_S); 688 if (err) 689 return err; 690 } 691 692 /* Output clkouthif with a divider M3, sources from hif */ 693 err = ti_adpll_init_divider(d, TI_ADPLL_M3, TI_ADPLL_S_CLKOUTHIF, "m3", 694 d->clocks[TI_ADPLL_HIF].clk, 695 d->regs + ADPLL_M3DIV_OFFSET, 696 ADPLL_M3DIV_M3, 697 ADPLL_M3DIV_M3_WIDTH, 698 CLK_DIVIDER_ONE_BASED); 699 if (err) 700 return err; 701 702 /* Output clock dcoclkldo is the DCO */ 703 704 return 0; 705 } 706 707 static int ti_adpll_init_children_adpll_lj(struct ti_adpll_data *d) 708 { 709 int err; 710 711 if (d->c->is_type_s) 712 return 0; 713 714 /* Output clkdcoldo, gated output of DCO */ 715 err = ti_adpll_init_gate(d, TI_ADPLL_DCO_GATE, TI_ADPLL_LJ_CLKDCOLDO, 716 "clkdcoldo", d->clocks[TI_ADPLL_DCO].clk, 717 d->regs + ADPLL_CLKCTRL_OFFSET, 718 ADPLL_CLKCTRL_CLKDCOLDOEN, 0); 719 if (err) 720 return err; 721 722 /* Internal divider M2, sources from DCO */ 723 err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV, 724 "m2", d->clocks[TI_ADPLL_DCO].clk, 725 d->regs + ADPLL_M2NDIV_OFFSET, 726 ADPLL_M2NDIV_M2, 727 ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH, 728 CLK_DIVIDER_ONE_BASED); 729 if (err) 730 return err; 731 732 /* Output clkoutldo, gated output of M2 */ 733 err = ti_adpll_init_gate(d, TI_ADPLL_M2_GATE, TI_ADPLL_LJ_CLKOUTLDO, 734 "clkoutldo", d->clocks[TI_ADPLL_M2].clk, 735 d->regs + ADPLL_CLKCTRL_OFFSET, 736 ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ, 737 0); 738 if (err) 739 return err; 740 741 /* Internal mux, sources from divider N2 or clkinpulow */ 742 err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass", 743 d->clocks[TI_ADPLL_N2].clk, 744 d->parent_clocks[TI_ADPLL_CLKINPULOW], 745 d->regs + ADPLL_CLKCTRL_OFFSET, 746 ADPLL_CLKCTRL_ULOWCLKEN); 747 if (err) 748 return err; 749 750 /* Output clkout, sources M2 or bypass */ 751 err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT, 752 ADPLL_CLKCTRL_CLKOUTEN, "clkout", 753 d->clocks[TI_ADPLL_M2].clk, 754 d->clocks[TI_ADPLL_BYPASS].clk); 755 if (err) 756 return err; 757 758 return 0; 759 } 760 761 static void ti_adpll_free_resources(struct ti_adpll_data *d) 762 { 763 int i; 764 765 for (i = TI_ADPLL_M3; i >= 0; i--) { 766 struct ti_adpll_clock *ac = &d->clocks[i]; 767 768 if (!ac || IS_ERR_OR_NULL(ac->clk)) 769 continue; 770 if (ac->cl) 771 clkdev_drop(ac->cl); 772 if (ac->unregister) 773 ac->unregister(ac->clk); 774 } 775 } 776 777 /* MPU PLL manages the lock register for all PLLs */ 778 static void ti_adpll_unlock_all(void __iomem *reg) 779 { 780 u32 v; 781 782 v = readl_relaxed(reg); 783 if (v == ADPLL_PLLSS_MMR_LOCK_ENABLED) 784 writel_relaxed(ADPLL_PLLSS_MMR_UNLOCK_MAGIC, reg); 785 } 786 787 static int ti_adpll_init_registers(struct ti_adpll_data *d) 788 { 789 int register_offset = 0; 790 791 if (d->c->is_type_s) { 792 register_offset = 8; 793 ti_adpll_unlock_all(d->iobase + ADPLL_PLLSS_MMR_LOCK_OFFSET); 794 } 795 796 d->regs = d->iobase + register_offset + ADPLL_PWRCTRL_OFFSET; 797 798 return 0; 799 } 800 801 static int ti_adpll_init_inputs(struct ti_adpll_data *d) 802 { 803 static const char error[] = "need at least %i inputs"; 804 struct clk *clock; 805 int nr_inputs; 806 807 nr_inputs = of_clk_get_parent_count(d->np); 808 if (nr_inputs < d->c->nr_max_inputs) { 809 dev_err(d->dev, error, nr_inputs); 810 return -EINVAL; 811 } 812 of_clk_parent_fill(d->np, d->parent_names, nr_inputs); 813 814 clock = devm_clk_get(d->dev, d->parent_names[0]); 815 if (IS_ERR(clock)) { 816 dev_err(d->dev, "could not get clkinp\n"); 817 return PTR_ERR(clock); 818 } 819 d->parent_clocks[TI_ADPLL_CLKINP] = clock; 820 821 clock = devm_clk_get(d->dev, d->parent_names[1]); 822 if (IS_ERR(clock)) { 823 dev_err(d->dev, "could not get clkinpulow clock\n"); 824 return PTR_ERR(clock); 825 } 826 d->parent_clocks[TI_ADPLL_CLKINPULOW] = clock; 827 828 if (d->c->is_type_s) { 829 clock = devm_clk_get(d->dev, d->parent_names[2]); 830 if (IS_ERR(clock)) { 831 dev_err(d->dev, "could not get clkinphif clock\n"); 832 return PTR_ERR(clock); 833 } 834 d->parent_clocks[TI_ADPLL_CLKINPHIF] = clock; 835 } 836 837 return 0; 838 } 839 840 static const struct ti_adpll_platform_data ti_adpll_type_s = { 841 .is_type_s = true, 842 .nr_max_inputs = MAX_ADPLL_INPUTS, 843 .nr_max_outputs = MAX_ADPLL_OUTPUTS, 844 .output_index = TI_ADPLL_S_DCOCLKLDO, 845 }; 846 847 static const struct ti_adpll_platform_data ti_adpll_type_lj = { 848 .is_type_s = false, 849 .nr_max_inputs = MAX_ADPLL_INPUTS - 1, 850 .nr_max_outputs = MAX_ADPLL_OUTPUTS - 1, 851 .output_index = -EINVAL, 852 }; 853 854 static const struct of_device_id ti_adpll_match[] = { 855 { .compatible = "ti,dm814-adpll-s-clock", &ti_adpll_type_s }, 856 { .compatible = "ti,dm814-adpll-lj-clock", &ti_adpll_type_lj }, 857 {}, 858 }; 859 MODULE_DEVICE_TABLE(of, ti_adpll_match); 860 861 static int ti_adpll_probe(struct platform_device *pdev) 862 { 863 struct device_node *node = pdev->dev.of_node; 864 struct device *dev = &pdev->dev; 865 struct ti_adpll_data *d; 866 struct resource *res; 867 int err; 868 869 d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); 870 if (!d) 871 return -ENOMEM; 872 d->dev = dev; 873 d->np = node; 874 d->c = device_get_match_data(dev); 875 dev_set_drvdata(d->dev, d); 876 spin_lock_init(&d->lock); 877 878 d->iobase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 879 if (IS_ERR(d->iobase)) 880 return PTR_ERR(d->iobase); 881 d->pa = res->start; 882 883 err = ti_adpll_init_registers(d); 884 if (err) 885 return err; 886 887 err = ti_adpll_init_inputs(d); 888 if (err) 889 return err; 890 891 d->clocks = devm_kcalloc(d->dev, 892 TI_ADPLL_NR_CLOCKS, 893 sizeof(struct ti_adpll_clock), 894 GFP_KERNEL); 895 if (!d->clocks) 896 return -ENOMEM; 897 898 err = ti_adpll_init_dco(d); 899 if (err) { 900 dev_err(dev, "could not register dco: %i\n", err); 901 goto free; 902 } 903 904 err = ti_adpll_init_children_adpll_s(d); 905 if (err) 906 goto free; 907 err = ti_adpll_init_children_adpll_lj(d); 908 if (err) 909 goto free; 910 911 err = of_clk_add_provider(d->np, of_clk_src_onecell_get, &d->outputs); 912 if (err) 913 goto free; 914 915 return 0; 916 917 free: 918 WARN_ON(1); 919 ti_adpll_free_resources(d); 920 921 return err; 922 } 923 924 static void ti_adpll_remove(struct platform_device *pdev) 925 { 926 struct ti_adpll_data *d = dev_get_drvdata(&pdev->dev); 927 928 ti_adpll_free_resources(d); 929 } 930 931 static struct platform_driver ti_adpll_driver = { 932 .driver = { 933 .name = "ti-adpll", 934 .of_match_table = ti_adpll_match, 935 }, 936 .probe = ti_adpll_probe, 937 .remove_new = ti_adpll_remove, 938 }; 939 940 static int __init ti_adpll_init(void) 941 { 942 return platform_driver_register(&ti_adpll_driver); 943 } 944 core_initcall(ti_adpll_init); 945 946 static void __exit ti_adpll_exit(void) 947 { 948 platform_driver_unregister(&ti_adpll_driver); 949 } 950 module_exit(ti_adpll_exit); 951 952 MODULE_DESCRIPTION("Clock driver for dm814x ADPLL"); 953 MODULE_ALIAS("platform:dm814-adpll-clock"); 954 MODULE_AUTHOR("Tony LIndgren <tony@atomide.com>"); 955 MODULE_LICENSE("GPL v2"); 956