1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * RZ/G2L Clock Pulse Generator 4 * 5 * Copyright (C) 2021 Renesas Electronics Corp. 6 * 7 * Based on renesas-cpg-mssr.c 8 * 9 * Copyright (C) 2015 Glider bvba 10 * Copyright (C) 2013 Ideas On Board SPRL 11 * Copyright (C) 2015 Renesas Electronics Corp. 12 */ 13 14 #include <linux/bitfield.h> 15 #include <linux/clk.h> 16 #include <linux/clk-provider.h> 17 #include <linux/clk/renesas.h> 18 #include <linux/delay.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/iopoll.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_clock.h> 27 #include <linux/pm_domain.h> 28 #include <linux/reset-controller.h> 29 #include <linux/slab.h> 30 #include <linux/units.h> 31 32 #include <dt-bindings/clock/renesas-cpg-mssr.h> 33 34 #include "rzg2l-cpg.h" 35 36 #ifdef DEBUG 37 #define WARN_DEBUG(x) WARN_ON(x) 38 #else 39 #define WARN_DEBUG(x) do { } while (0) 40 #endif 41 42 #define GET_SHIFT(val) ((val >> 12) & 0xff) 43 #define GET_WIDTH(val) ((val >> 8) & 0xf) 44 45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val)) 46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val) 47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val) 48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val) 49 50 #define RZG3S_DIV_P GENMASK(28, 26) 51 #define RZG3S_DIV_M GENMASK(25, 22) 52 #define RZG3S_DIV_NI GENMASK(21, 13) 53 #define RZG3S_DIV_NF GENMASK(12, 1) 54 55 #define CLK_ON_R(reg) (reg) 56 #define CLK_MON_R(reg) (0x180 + (reg)) 57 #define CLK_RST_R(reg) (reg) 58 #define CLK_MRST_R(reg) (0x180 + (reg)) 59 60 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) 61 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) 62 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) 63 64 #define CPG_WEN_BIT BIT(16) 65 66 #define MAX_VCLK_FREQ (148500000) 67 68 /** 69 * struct clk_hw_data - clock hardware data 70 * @hw: clock hw 71 * @conf: clock configuration (register offset, shift, width) 72 * @sconf: clock status configuration (register offset, shift, width) 73 * @priv: CPG private data structure 74 */ 75 struct clk_hw_data { 76 struct clk_hw hw; 77 u32 conf; 78 u32 sconf; 79 struct rzg2l_cpg_priv *priv; 80 }; 81 82 #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw) 83 84 /** 85 * struct sd_mux_hw_data - SD MUX clock hardware data 86 * @hw_data: clock hw data 87 * @mtable: clock mux table 88 */ 89 struct sd_mux_hw_data { 90 struct clk_hw_data hw_data; 91 const u32 *mtable; 92 }; 93 94 #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data) 95 96 /** 97 * struct div_hw_data - divider clock hardware data 98 * @hw_data: clock hw data 99 * @dtable: pointer to divider table 100 * @invalid_rate: invalid rate for divider 101 * @max_rate: maximum rate for divider 102 * @width: divider width 103 */ 104 struct div_hw_data { 105 struct clk_hw_data hw_data; 106 const struct clk_div_table *dtable; 107 unsigned long invalid_rate; 108 unsigned long max_rate; 109 u32 width; 110 }; 111 112 #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data) 113 114 struct rzg2l_pll5_param { 115 u32 pl5_fracin; 116 u8 pl5_refdiv; 117 u8 pl5_intin; 118 u8 pl5_postdiv1; 119 u8 pl5_postdiv2; 120 u8 pl5_spread; 121 }; 122 123 struct rzg2l_pll5_mux_dsi_div_param { 124 u8 clksrc; 125 u8 dsi_div_a; 126 u8 dsi_div_b; 127 }; 128 129 /** 130 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data 131 * 132 * @rcdev: Reset controller entity 133 * @dev: CPG device 134 * @base: CPG register block base address 135 * @rmw_lock: protects register accesses 136 * @clks: Array containing all Core and Module Clocks 137 * @num_core_clks: Number of Core Clocks in clks[] 138 * @num_mod_clks: Number of Module Clocks in clks[] 139 * @num_resets: Number of Module Resets in info->resets[] 140 * @last_dt_core_clk: ID of the last Core Clock exported to DT 141 * @info: Pointer to platform data 142 * @mux_dsi_div_params: pll5 mux and dsi div parameters 143 */ 144 struct rzg2l_cpg_priv { 145 struct reset_controller_dev rcdev; 146 struct device *dev; 147 void __iomem *base; 148 spinlock_t rmw_lock; 149 150 struct clk **clks; 151 unsigned int num_core_clks; 152 unsigned int num_mod_clks; 153 unsigned int num_resets; 154 unsigned int last_dt_core_clk; 155 156 const struct rzg2l_cpg_info *info; 157 158 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params; 159 }; 160 161 static void rzg2l_cpg_del_clk_provider(void *data) 162 { 163 of_clk_del_provider(data); 164 } 165 166 /* Must be called in atomic context. */ 167 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf) 168 { 169 u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf); 170 u32 off = GET_REG_OFFSET(conf); 171 u32 val; 172 173 return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200); 174 } 175 176 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, 177 void *data) 178 { 179 struct clk_notifier_data *cnd = data; 180 struct clk_hw *hw = __clk_get_hw(cnd->clk); 181 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 182 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 183 u32 off = GET_REG_OFFSET(clk_hw_data->conf); 184 u32 shift = GET_SHIFT(clk_hw_data->conf); 185 const u32 clk_src_266 = 3; 186 unsigned long flags; 187 int ret; 188 189 if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266)) 190 return NOTIFY_DONE; 191 192 spin_lock_irqsave(&priv->rmw_lock, flags); 193 194 /* 195 * As per the HW manual, we should not directly switch from 533 MHz to 196 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) 197 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, 198 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 199 * (400 MHz)). 200 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock 201 * switching register is prohibited. 202 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and 203 * the index to value mapping is done by adding 1 to the index. 204 */ 205 206 writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off); 207 208 /* Wait for the update done. */ 209 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 210 211 spin_unlock_irqrestore(&priv->rmw_lock, flags); 212 213 if (ret) 214 dev_err(priv->dev, "failed to switch to safe clk source\n"); 215 216 return notifier_from_errno(ret); 217 } 218 219 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event, 220 void *data) 221 { 222 struct clk_notifier_data *cnd = data; 223 struct clk_hw *hw = __clk_get_hw(cnd->clk); 224 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 225 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 226 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 227 u32 off = GET_REG_OFFSET(clk_hw_data->conf); 228 u32 shift = GET_SHIFT(clk_hw_data->conf); 229 unsigned long flags; 230 int ret = 0; 231 u32 val; 232 233 if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate || 234 div_hw_data->invalid_rate % cnd->new_rate) 235 return NOTIFY_DONE; 236 237 spin_lock_irqsave(&priv->rmw_lock, flags); 238 239 val = readl(priv->base + off); 240 val >>= shift; 241 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 242 243 /* 244 * There are different constraints for the user of this notifiers as follows: 245 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz 246 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz 247 * As SD can have only one parent having 800MHz and OCTA div can have 248 * only one parent having 400MHz we took into account the parent rate 249 * at the beginning of function (by checking invalid_rate % new_rate). 250 * Now it is time to check the hardware divider and update it accordingly. 251 */ 252 if (!val) { 253 writel((CPG_WEN_BIT | 1) << shift, priv->base + off); 254 /* Wait for the update done. */ 255 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 256 } 257 258 spin_unlock_irqrestore(&priv->rmw_lock, flags); 259 260 if (ret) 261 dev_err(priv->dev, "Failed to downgrade the div\n"); 262 263 return notifier_from_errno(ret); 264 } 265 266 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core, 267 struct rzg2l_cpg_priv *priv) 268 { 269 struct notifier_block *nb; 270 271 if (!core->notifier) 272 return 0; 273 274 nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL); 275 if (!nb) 276 return -ENOMEM; 277 278 nb->notifier_call = core->notifier; 279 280 return clk_notifier_register(hw->clk, nb); 281 } 282 283 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw, 284 unsigned long parent_rate) 285 { 286 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 287 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 288 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 289 u32 val; 290 291 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); 292 val >>= GET_SHIFT(clk_hw_data->conf); 293 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 294 295 return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable, 296 CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width); 297 } 298 299 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) 300 { 301 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 302 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 303 304 if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate) 305 req->rate = div_hw_data->max_rate; 306 307 return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width, 308 CLK_DIVIDER_ROUND_CLOSEST); 309 } 310 311 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate, 312 unsigned long parent_rate) 313 { 314 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 315 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data); 316 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 317 u32 off = GET_REG_OFFSET(clk_hw_data->conf); 318 u32 shift = GET_SHIFT(clk_hw_data->conf); 319 unsigned long flags; 320 u32 val; 321 int ret; 322 323 val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width, 324 CLK_DIVIDER_ROUND_CLOSEST); 325 326 spin_lock_irqsave(&priv->rmw_lock, flags); 327 writel((CPG_WEN_BIT | val) << shift, priv->base + off); 328 /* Wait for the update done. */ 329 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 330 spin_unlock_irqrestore(&priv->rmw_lock, flags); 331 332 return ret; 333 } 334 335 static const struct clk_ops rzg3s_div_clk_ops = { 336 .recalc_rate = rzg3s_div_clk_recalc_rate, 337 .determine_rate = rzg3s_div_clk_determine_rate, 338 .set_rate = rzg3s_div_clk_set_rate, 339 }; 340 341 static struct clk * __init 342 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv) 343 { 344 struct div_hw_data *div_hw_data; 345 struct clk_init_data init = {}; 346 const struct clk_div_table *clkt; 347 struct clk_hw *clk_hw; 348 const struct clk *parent; 349 const char *parent_name; 350 u32 max = 0; 351 int ret; 352 353 parent = priv->clks[core->parent]; 354 if (IS_ERR(parent)) 355 return ERR_CAST(parent); 356 357 parent_name = __clk_get_name(parent); 358 359 div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL); 360 if (!div_hw_data) 361 return ERR_PTR(-ENOMEM); 362 363 init.name = core->name; 364 init.flags = core->flag; 365 init.ops = &rzg3s_div_clk_ops; 366 init.parent_names = &parent_name; 367 init.num_parents = 1; 368 369 /* Get the maximum divider to retrieve div width. */ 370 for (clkt = core->dtable; clkt->div; clkt++) { 371 if (max < clkt->div) 372 max = clkt->div; 373 } 374 375 div_hw_data->hw_data.priv = priv; 376 div_hw_data->hw_data.conf = core->conf; 377 div_hw_data->hw_data.sconf = core->sconf; 378 div_hw_data->dtable = core->dtable; 379 div_hw_data->invalid_rate = core->invalid_rate; 380 div_hw_data->max_rate = core->max_rate; 381 div_hw_data->width = fls(max) - 1; 382 383 clk_hw = &div_hw_data->hw_data.hw; 384 clk_hw->init = &init; 385 386 ret = devm_clk_hw_register(priv->dev, clk_hw); 387 if (ret) 388 return ERR_PTR(ret); 389 390 ret = rzg2l_register_notifier(clk_hw, core, priv); 391 if (ret) { 392 dev_err(priv->dev, "Failed to register notifier for %s\n", 393 core->name); 394 return ERR_PTR(ret); 395 } 396 397 return clk_hw->clk; 398 } 399 400 static struct clk * __init 401 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core, 402 struct rzg2l_cpg_priv *priv) 403 { 404 void __iomem *base = priv->base; 405 struct device *dev = priv->dev; 406 const struct clk *parent; 407 const char *parent_name; 408 struct clk_hw *clk_hw; 409 410 parent = priv->clks[core->parent]; 411 if (IS_ERR(parent)) 412 return ERR_CAST(parent); 413 414 parent_name = __clk_get_name(parent); 415 416 if (core->dtable) 417 clk_hw = clk_hw_register_divider_table(dev, core->name, 418 parent_name, 0, 419 base + GET_REG_OFFSET(core->conf), 420 GET_SHIFT(core->conf), 421 GET_WIDTH(core->conf), 422 core->flag, 423 core->dtable, 424 &priv->rmw_lock); 425 else 426 clk_hw = clk_hw_register_divider(dev, core->name, 427 parent_name, 0, 428 base + GET_REG_OFFSET(core->conf), 429 GET_SHIFT(core->conf), 430 GET_WIDTH(core->conf), 431 core->flag, &priv->rmw_lock); 432 433 if (IS_ERR(clk_hw)) 434 return ERR_CAST(clk_hw); 435 436 return clk_hw->clk; 437 } 438 439 static struct clk * __init 440 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core, 441 struct rzg2l_cpg_priv *priv) 442 { 443 const struct clk_hw *clk_hw; 444 445 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, 446 core->parent_names, core->num_parents, 447 core->flag, 448 priv->base + GET_REG_OFFSET(core->conf), 449 GET_SHIFT(core->conf), 450 GET_WIDTH(core->conf), 451 core->mux_flags, &priv->rmw_lock); 452 if (IS_ERR(clk_hw)) 453 return ERR_CAST(clk_hw); 454 455 return clk_hw->clk; 456 } 457 458 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) 459 { 460 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 461 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); 462 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 463 u32 off = GET_REG_OFFSET(clk_hw_data->conf); 464 u32 shift = GET_SHIFT(clk_hw_data->conf); 465 unsigned long flags; 466 u32 val; 467 int ret; 468 469 val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index); 470 471 spin_lock_irqsave(&priv->rmw_lock, flags); 472 473 writel((CPG_WEN_BIT | val) << shift, priv->base + off); 474 475 /* Wait for the update done. */ 476 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); 477 478 spin_unlock_irqrestore(&priv->rmw_lock, flags); 479 480 if (ret) 481 dev_err(priv->dev, "Failed to switch parent\n"); 482 483 return ret; 484 } 485 486 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) 487 { 488 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); 489 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); 490 struct rzg2l_cpg_priv *priv = clk_hw_data->priv; 491 u32 val; 492 493 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); 494 val >>= GET_SHIFT(clk_hw_data->conf); 495 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); 496 497 return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val); 498 } 499 500 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { 501 .determine_rate = __clk_mux_determine_rate_closest, 502 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent, 503 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent, 504 }; 505 506 static struct clk * __init 507 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, 508 struct rzg2l_cpg_priv *priv) 509 { 510 struct sd_mux_hw_data *sd_mux_hw_data; 511 struct clk_init_data init; 512 struct clk_hw *clk_hw; 513 int ret; 514 515 sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL); 516 if (!sd_mux_hw_data) 517 return ERR_PTR(-ENOMEM); 518 519 sd_mux_hw_data->hw_data.priv = priv; 520 sd_mux_hw_data->hw_data.conf = core->conf; 521 sd_mux_hw_data->hw_data.sconf = core->sconf; 522 sd_mux_hw_data->mtable = core->mtable; 523 524 init.name = core->name; 525 init.ops = &rzg2l_cpg_sd_clk_mux_ops; 526 init.flags = core->flag; 527 init.num_parents = core->num_parents; 528 init.parent_names = core->parent_names; 529 530 clk_hw = &sd_mux_hw_data->hw_data.hw; 531 clk_hw->init = &init; 532 533 ret = devm_clk_hw_register(priv->dev, clk_hw); 534 if (ret) 535 return ERR_PTR(ret); 536 537 ret = rzg2l_register_notifier(clk_hw, core, priv); 538 if (ret) { 539 dev_err(priv->dev, "Failed to register notifier for %s\n", 540 core->name); 541 return ERR_PTR(ret); 542 } 543 544 return clk_hw->clk; 545 } 546 547 static unsigned long 548 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, 549 unsigned long rate) 550 { 551 unsigned long foutpostdiv_rate; 552 553 params->pl5_intin = rate / MEGA; 554 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA); 555 params->pl5_refdiv = 2; 556 params->pl5_postdiv1 = 1; 557 params->pl5_postdiv2 = 1; 558 params->pl5_spread = 0x16; 559 560 foutpostdiv_rate = 561 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv * 562 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) / 563 (params->pl5_postdiv1 * params->pl5_postdiv2); 564 565 return foutpostdiv_rate; 566 } 567 568 struct dsi_div_hw_data { 569 struct clk_hw hw; 570 u32 conf; 571 unsigned long rate; 572 struct rzg2l_cpg_priv *priv; 573 }; 574 575 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw) 576 577 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw, 578 unsigned long parent_rate) 579 { 580 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); 581 unsigned long rate = dsi_div->rate; 582 583 if (!rate) 584 rate = parent_rate; 585 586 return rate; 587 } 588 589 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw, 590 unsigned long rate) 591 { 592 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); 593 struct rzg2l_cpg_priv *priv = dsi_div->priv; 594 struct rzg2l_pll5_param params; 595 unsigned long parent_rate; 596 597 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate); 598 599 if (priv->mux_dsi_div_params.clksrc) 600 parent_rate /= 2; 601 602 return parent_rate; 603 } 604 605 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw, 606 struct clk_rate_request *req) 607 { 608 if (req->rate > MAX_VCLK_FREQ) 609 req->rate = MAX_VCLK_FREQ; 610 611 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate); 612 613 return 0; 614 } 615 616 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw, 617 unsigned long rate, 618 unsigned long parent_rate) 619 { 620 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); 621 struct rzg2l_cpg_priv *priv = dsi_div->priv; 622 623 /* 624 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK 625 * 626 * Based on the dot clock, the DSI divider clock sets the divider value, 627 * calculates the pll parameters for generating FOUTPOSTDIV and the clk 628 * source for the MUX and propagates that info to the parents. 629 */ 630 631 if (!rate || rate > MAX_VCLK_FREQ) 632 return -EINVAL; 633 634 dsi_div->rate = rate; 635 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN | 636 (priv->mux_dsi_div_params.dsi_div_a << 0) | 637 (priv->mux_dsi_div_params.dsi_div_b << 8), 638 priv->base + CPG_PL5_SDIV); 639 640 return 0; 641 } 642 643 static const struct clk_ops rzg2l_cpg_dsi_div_ops = { 644 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate, 645 .determine_rate = rzg2l_cpg_dsi_div_determine_rate, 646 .set_rate = rzg2l_cpg_dsi_div_set_rate, 647 }; 648 649 static struct clk * __init 650 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core, 651 struct rzg2l_cpg_priv *priv) 652 { 653 struct dsi_div_hw_data *clk_hw_data; 654 const struct clk *parent; 655 const char *parent_name; 656 struct clk_init_data init; 657 struct clk_hw *clk_hw; 658 int ret; 659 660 parent = priv->clks[core->parent]; 661 if (IS_ERR(parent)) 662 return ERR_CAST(parent); 663 664 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); 665 if (!clk_hw_data) 666 return ERR_PTR(-ENOMEM); 667 668 clk_hw_data->priv = priv; 669 670 parent_name = __clk_get_name(parent); 671 init.name = core->name; 672 init.ops = &rzg2l_cpg_dsi_div_ops; 673 init.flags = CLK_SET_RATE_PARENT; 674 init.parent_names = &parent_name; 675 init.num_parents = 1; 676 677 clk_hw = &clk_hw_data->hw; 678 clk_hw->init = &init; 679 680 ret = devm_clk_hw_register(priv->dev, clk_hw); 681 if (ret) 682 return ERR_PTR(ret); 683 684 return clk_hw->clk; 685 } 686 687 struct pll5_mux_hw_data { 688 struct clk_hw hw; 689 u32 conf; 690 unsigned long rate; 691 struct rzg2l_cpg_priv *priv; 692 }; 693 694 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw) 695 696 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw, 697 struct clk_rate_request *req) 698 { 699 struct clk_hw *parent; 700 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); 701 struct rzg2l_cpg_priv *priv = hwdata->priv; 702 703 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc); 704 req->best_parent_hw = parent; 705 req->best_parent_rate = req->rate; 706 707 return 0; 708 } 709 710 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index) 711 { 712 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); 713 struct rzg2l_cpg_priv *priv = hwdata->priv; 714 715 /* 716 * FOUTPOSTDIV--->| 717 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK 718 * |--FOUT1PH0-->| 719 * 720 * Based on the dot clock, the DSI divider clock calculates the parent 721 * rate and clk source for the MUX. It propagates that info to 722 * pll5_4_clk_mux which sets the clock source for DSI divider clock. 723 */ 724 725 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index, 726 priv->base + CPG_OTHERFUNC1_REG); 727 728 return 0; 729 } 730 731 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw) 732 { 733 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); 734 struct rzg2l_cpg_priv *priv = hwdata->priv; 735 736 return readl(priv->base + GET_REG_OFFSET(hwdata->conf)); 737 } 738 739 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = { 740 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate, 741 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent, 742 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent, 743 }; 744 745 static struct clk * __init 746 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core, 747 struct rzg2l_cpg_priv *priv) 748 { 749 struct pll5_mux_hw_data *clk_hw_data; 750 struct clk_init_data init; 751 struct clk_hw *clk_hw; 752 int ret; 753 754 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); 755 if (!clk_hw_data) 756 return ERR_PTR(-ENOMEM); 757 758 clk_hw_data->priv = priv; 759 clk_hw_data->conf = core->conf; 760 761 init.name = core->name; 762 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops; 763 init.flags = CLK_SET_RATE_PARENT; 764 init.num_parents = core->num_parents; 765 init.parent_names = core->parent_names; 766 767 clk_hw = &clk_hw_data->hw; 768 clk_hw->init = &init; 769 770 ret = devm_clk_hw_register(priv->dev, clk_hw); 771 if (ret) 772 return ERR_PTR(ret); 773 774 return clk_hw->clk; 775 } 776 777 struct sipll5 { 778 struct clk_hw hw; 779 u32 conf; 780 unsigned long foutpostdiv_rate; 781 struct rzg2l_cpg_priv *priv; 782 }; 783 784 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw) 785 786 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw, 787 unsigned long rate) 788 { 789 struct sipll5 *sipll5 = to_sipll5(hw); 790 struct rzg2l_cpg_priv *priv = sipll5->priv; 791 unsigned long vclk; 792 793 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) * 794 (priv->mux_dsi_div_params.dsi_div_b + 1)); 795 796 if (priv->mux_dsi_div_params.clksrc) 797 vclk /= 2; 798 799 return vclk; 800 } 801 802 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw, 803 unsigned long parent_rate) 804 { 805 struct sipll5 *sipll5 = to_sipll5(hw); 806 unsigned long pll5_rate = sipll5->foutpostdiv_rate; 807 808 if (!pll5_rate) 809 pll5_rate = parent_rate; 810 811 return pll5_rate; 812 } 813 814 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw, 815 unsigned long rate, 816 unsigned long *parent_rate) 817 { 818 return rate; 819 } 820 821 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw, 822 unsigned long rate, 823 unsigned long parent_rate) 824 { 825 struct sipll5 *sipll5 = to_sipll5(hw); 826 struct rzg2l_cpg_priv *priv = sipll5->priv; 827 struct rzg2l_pll5_param params; 828 unsigned long vclk_rate; 829 int ret; 830 u32 val; 831 832 /* 833 * OSC --> PLL5 --> FOUTPOSTDIV-->| 834 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK 835 * |--FOUT1PH0-->| 836 * 837 * Based on the dot clock, the DSI divider clock calculates the parent 838 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates 839 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV. 840 * 841 * OSC --> PLL5 --> FOUTPOSTDIV 842 */ 843 844 if (!rate) 845 return -EINVAL; 846 847 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate); 848 sipll5->foutpostdiv_rate = 849 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate); 850 851 /* Put PLL5 into standby mode */ 852 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY); 853 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val, 854 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000); 855 if (ret) { 856 dev_err(priv->dev, "failed to release pll5 lock"); 857 return ret; 858 } 859 860 /* Output clock setting 1 */ 861 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) | 862 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1); 863 864 /* Output clock setting, SSCG modulation value setting 3 */ 865 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3); 866 867 /* Output clock setting 4 */ 868 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16), 869 priv->base + CPG_SIPLL5_CLK4); 870 871 /* Output clock setting 5 */ 872 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5); 873 874 /* PLL normal mode setting */ 875 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN | 876 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB, 877 priv->base + CPG_SIPLL5_STBY); 878 879 /* PLL normal mode transition, output clock stability check */ 880 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val, 881 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000); 882 if (ret) { 883 dev_err(priv->dev, "failed to lock pll5"); 884 return ret; 885 } 886 887 return 0; 888 } 889 890 static const struct clk_ops rzg2l_cpg_sipll5_ops = { 891 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate, 892 .round_rate = rzg2l_cpg_sipll5_round_rate, 893 .set_rate = rzg2l_cpg_sipll5_set_rate, 894 }; 895 896 static struct clk * __init 897 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core, 898 struct rzg2l_cpg_priv *priv) 899 { 900 const struct clk *parent; 901 struct clk_init_data init; 902 const char *parent_name; 903 struct sipll5 *sipll5; 904 struct clk_hw *clk_hw; 905 int ret; 906 907 parent = priv->clks[core->parent]; 908 if (IS_ERR(parent)) 909 return ERR_CAST(parent); 910 911 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL); 912 if (!sipll5) 913 return ERR_PTR(-ENOMEM); 914 915 init.name = core->name; 916 parent_name = __clk_get_name(parent); 917 init.ops = &rzg2l_cpg_sipll5_ops; 918 init.flags = 0; 919 init.parent_names = &parent_name; 920 init.num_parents = 1; 921 922 sipll5->hw.init = &init; 923 sipll5->conf = core->conf; 924 sipll5->priv = priv; 925 926 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN | 927 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY); 928 929 clk_hw = &sipll5->hw; 930 clk_hw->init = &init; 931 932 ret = devm_clk_hw_register(priv->dev, clk_hw); 933 if (ret) 934 return ERR_PTR(ret); 935 936 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */ 937 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */ 938 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */ 939 940 return clk_hw->clk; 941 } 942 943 struct pll_clk { 944 struct clk_hw hw; 945 unsigned int conf; 946 unsigned int type; 947 void __iomem *base; 948 struct rzg2l_cpg_priv *priv; 949 }; 950 951 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw) 952 953 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw, 954 unsigned long parent_rate) 955 { 956 struct pll_clk *pll_clk = to_pll(hw); 957 struct rzg2l_cpg_priv *priv = pll_clk->priv; 958 unsigned int val1, val2; 959 u64 rate; 960 961 if (pll_clk->type != CLK_TYPE_SAM_PLL) 962 return parent_rate; 963 964 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); 965 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf)); 966 967 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1), 968 16 + SDIV(val2)); 969 970 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1)); 971 } 972 973 static const struct clk_ops rzg2l_cpg_pll_ops = { 974 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate, 975 }; 976 977 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw, 978 unsigned long parent_rate) 979 { 980 struct pll_clk *pll_clk = to_pll(hw); 981 struct rzg2l_cpg_priv *priv = pll_clk->priv; 982 u32 nir, nfr, mr, pr, val; 983 u64 rate; 984 985 if (pll_clk->type != CLK_TYPE_G3S_PLL) 986 return parent_rate; 987 988 val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); 989 990 pr = 1 << FIELD_GET(RZG3S_DIV_P, val); 991 /* Hardware interprets values higher than 8 as p = 16. */ 992 if (pr > 8) 993 pr = 16; 994 995 mr = FIELD_GET(RZG3S_DIV_M, val) + 1; 996 nir = FIELD_GET(RZG3S_DIV_NI, val) + 1; 997 nfr = FIELD_GET(RZG3S_DIV_NF, val); 998 999 rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12); 1000 1001 return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr)); 1002 } 1003 1004 static const struct clk_ops rzg3s_cpg_pll_ops = { 1005 .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate, 1006 }; 1007 1008 static struct clk * __init 1009 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, 1010 struct rzg2l_cpg_priv *priv, 1011 const struct clk_ops *ops) 1012 { 1013 struct device *dev = priv->dev; 1014 const struct clk *parent; 1015 struct clk_init_data init; 1016 const char *parent_name; 1017 struct pll_clk *pll_clk; 1018 int ret; 1019 1020 parent = priv->clks[core->parent]; 1021 if (IS_ERR(parent)) 1022 return ERR_CAST(parent); 1023 1024 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); 1025 if (!pll_clk) 1026 return ERR_PTR(-ENOMEM); 1027 1028 parent_name = __clk_get_name(parent); 1029 init.name = core->name; 1030 init.ops = ops; 1031 init.flags = 0; 1032 init.parent_names = &parent_name; 1033 init.num_parents = 1; 1034 1035 pll_clk->hw.init = &init; 1036 pll_clk->conf = core->conf; 1037 pll_clk->base = priv->base; 1038 pll_clk->priv = priv; 1039 pll_clk->type = core->type; 1040 1041 ret = devm_clk_hw_register(dev, &pll_clk->hw); 1042 if (ret) 1043 return ERR_PTR(ret); 1044 1045 return pll_clk->hw.clk; 1046 } 1047 1048 static struct clk 1049 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, 1050 void *data) 1051 { 1052 unsigned int clkidx = clkspec->args[1]; 1053 struct rzg2l_cpg_priv *priv = data; 1054 struct device *dev = priv->dev; 1055 const char *type; 1056 struct clk *clk; 1057 1058 switch (clkspec->args[0]) { 1059 case CPG_CORE: 1060 type = "core"; 1061 if (clkidx > priv->last_dt_core_clk) { 1062 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); 1063 return ERR_PTR(-EINVAL); 1064 } 1065 clk = priv->clks[clkidx]; 1066 break; 1067 1068 case CPG_MOD: 1069 type = "module"; 1070 if (clkidx >= priv->num_mod_clks) { 1071 dev_err(dev, "Invalid %s clock index %u\n", type, 1072 clkidx); 1073 return ERR_PTR(-EINVAL); 1074 } 1075 clk = priv->clks[priv->num_core_clks + clkidx]; 1076 break; 1077 1078 default: 1079 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]); 1080 return ERR_PTR(-EINVAL); 1081 } 1082 1083 if (IS_ERR(clk)) 1084 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, 1085 PTR_ERR(clk)); 1086 else 1087 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", 1088 clkspec->args[0], clkspec->args[1], clk, 1089 clk_get_rate(clk)); 1090 return clk; 1091 } 1092 1093 static void __init 1094 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, 1095 const struct rzg2l_cpg_info *info, 1096 struct rzg2l_cpg_priv *priv) 1097 { 1098 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent; 1099 struct device *dev = priv->dev; 1100 unsigned int id = core->id, div = core->div; 1101 const char *parent_name; 1102 struct clk_hw *clk_hw; 1103 1104 WARN_DEBUG(id >= priv->num_core_clks); 1105 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 1106 1107 if (!core->name) { 1108 /* Skip NULLified clock */ 1109 return; 1110 } 1111 1112 switch (core->type) { 1113 case CLK_TYPE_IN: 1114 clk = of_clk_get_by_name(priv->dev->of_node, core->name); 1115 break; 1116 case CLK_TYPE_FF: 1117 WARN_DEBUG(core->parent >= priv->num_core_clks); 1118 parent = priv->clks[core->parent]; 1119 if (IS_ERR(parent)) { 1120 clk = parent; 1121 goto fail; 1122 } 1123 1124 parent_name = __clk_get_name(parent); 1125 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name, 1126 CLK_SET_RATE_PARENT, 1127 core->mult, div); 1128 if (IS_ERR(clk_hw)) 1129 clk = ERR_CAST(clk_hw); 1130 else 1131 clk = clk_hw->clk; 1132 break; 1133 case CLK_TYPE_SAM_PLL: 1134 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops); 1135 break; 1136 case CLK_TYPE_G3S_PLL: 1137 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops); 1138 break; 1139 case CLK_TYPE_SIPLL5: 1140 clk = rzg2l_cpg_sipll5_register(core, priv); 1141 break; 1142 case CLK_TYPE_DIV: 1143 clk = rzg2l_cpg_div_clk_register(core, priv); 1144 break; 1145 case CLK_TYPE_G3S_DIV: 1146 clk = rzg3s_cpg_div_clk_register(core, priv); 1147 break; 1148 case CLK_TYPE_MUX: 1149 clk = rzg2l_cpg_mux_clk_register(core, priv); 1150 break; 1151 case CLK_TYPE_SD_MUX: 1152 clk = rzg2l_cpg_sd_mux_clk_register(core, priv); 1153 break; 1154 case CLK_TYPE_PLL5_4_MUX: 1155 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv); 1156 break; 1157 case CLK_TYPE_DSI_DIV: 1158 clk = rzg2l_cpg_dsi_div_clk_register(core, priv); 1159 break; 1160 default: 1161 goto fail; 1162 } 1163 1164 if (IS_ERR_OR_NULL(clk)) 1165 goto fail; 1166 1167 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 1168 priv->clks[id] = clk; 1169 return; 1170 1171 fail: 1172 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core", 1173 core->name, PTR_ERR(clk)); 1174 } 1175 1176 /** 1177 * struct mstp_clock - MSTP gating clock 1178 * 1179 * @hw: handle between common and hardware-specific interfaces 1180 * @off: register offset 1181 * @bit: ON/MON bit 1182 * @enabled: soft state of the clock, if it is coupled with another clock 1183 * @priv: CPG/MSTP private data 1184 * @sibling: pointer to the other coupled clock 1185 */ 1186 struct mstp_clock { 1187 struct clk_hw hw; 1188 u16 off; 1189 u8 bit; 1190 bool enabled; 1191 struct rzg2l_cpg_priv *priv; 1192 struct mstp_clock *sibling; 1193 }; 1194 1195 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw) 1196 1197 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) 1198 { 1199 struct mstp_clock *clock = to_mod_clock(hw); 1200 struct rzg2l_cpg_priv *priv = clock->priv; 1201 unsigned int reg = clock->off; 1202 struct device *dev = priv->dev; 1203 u32 bitmask = BIT(clock->bit); 1204 u32 value; 1205 int error; 1206 1207 if (!clock->off) { 1208 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk); 1209 return 0; 1210 } 1211 1212 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk, 1213 enable ? "ON" : "OFF"); 1214 1215 value = bitmask << 16; 1216 if (enable) 1217 value |= bitmask; 1218 1219 writel(value, priv->base + CLK_ON_R(reg)); 1220 1221 if (!enable) 1222 return 0; 1223 1224 if (!priv->info->has_clk_mon_regs) 1225 return 0; 1226 1227 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value, 1228 value & bitmask, 0, 10); 1229 if (error) 1230 dev_err(dev, "Failed to enable CLK_ON %p\n", 1231 priv->base + CLK_ON_R(reg)); 1232 1233 return error; 1234 } 1235 1236 static int rzg2l_mod_clock_enable(struct clk_hw *hw) 1237 { 1238 struct mstp_clock *clock = to_mod_clock(hw); 1239 1240 if (clock->sibling) { 1241 struct rzg2l_cpg_priv *priv = clock->priv; 1242 unsigned long flags; 1243 bool enabled; 1244 1245 spin_lock_irqsave(&priv->rmw_lock, flags); 1246 enabled = clock->sibling->enabled; 1247 clock->enabled = true; 1248 spin_unlock_irqrestore(&priv->rmw_lock, flags); 1249 if (enabled) 1250 return 0; 1251 } 1252 1253 return rzg2l_mod_clock_endisable(hw, true); 1254 } 1255 1256 static void rzg2l_mod_clock_disable(struct clk_hw *hw) 1257 { 1258 struct mstp_clock *clock = to_mod_clock(hw); 1259 1260 if (clock->sibling) { 1261 struct rzg2l_cpg_priv *priv = clock->priv; 1262 unsigned long flags; 1263 bool enabled; 1264 1265 spin_lock_irqsave(&priv->rmw_lock, flags); 1266 enabled = clock->sibling->enabled; 1267 clock->enabled = false; 1268 spin_unlock_irqrestore(&priv->rmw_lock, flags); 1269 if (enabled) 1270 return; 1271 } 1272 1273 rzg2l_mod_clock_endisable(hw, false); 1274 } 1275 1276 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) 1277 { 1278 struct mstp_clock *clock = to_mod_clock(hw); 1279 struct rzg2l_cpg_priv *priv = clock->priv; 1280 u32 bitmask = BIT(clock->bit); 1281 u32 value; 1282 1283 if (!clock->off) { 1284 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk); 1285 return 1; 1286 } 1287 1288 if (clock->sibling) 1289 return clock->enabled; 1290 1291 if (priv->info->has_clk_mon_regs) 1292 value = readl(priv->base + CLK_MON_R(clock->off)); 1293 else 1294 value = readl(priv->base + clock->off); 1295 1296 return value & bitmask; 1297 } 1298 1299 static const struct clk_ops rzg2l_mod_clock_ops = { 1300 .enable = rzg2l_mod_clock_enable, 1301 .disable = rzg2l_mod_clock_disable, 1302 .is_enabled = rzg2l_mod_clock_is_enabled, 1303 }; 1304 1305 static struct mstp_clock 1306 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock, 1307 struct rzg2l_cpg_priv *priv) 1308 { 1309 struct clk_hw *hw; 1310 unsigned int i; 1311 1312 for (i = 0; i < priv->num_mod_clks; i++) { 1313 struct mstp_clock *clk; 1314 1315 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT)) 1316 continue; 1317 1318 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]); 1319 clk = to_mod_clock(hw); 1320 if (clock->off == clk->off && clock->bit == clk->bit) 1321 return clk; 1322 } 1323 1324 return NULL; 1325 } 1326 1327 static void __init 1328 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, 1329 const struct rzg2l_cpg_info *info, 1330 struct rzg2l_cpg_priv *priv) 1331 { 1332 struct mstp_clock *clock = NULL; 1333 struct device *dev = priv->dev; 1334 unsigned int id = mod->id; 1335 struct clk_init_data init; 1336 struct clk *parent, *clk; 1337 const char *parent_name; 1338 unsigned int i; 1339 int ret; 1340 1341 WARN_DEBUG(id < priv->num_core_clks); 1342 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); 1343 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); 1344 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 1345 1346 if (!mod->name) { 1347 /* Skip NULLified clock */ 1348 return; 1349 } 1350 1351 parent = priv->clks[mod->parent]; 1352 if (IS_ERR(parent)) { 1353 clk = parent; 1354 goto fail; 1355 } 1356 1357 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); 1358 if (!clock) { 1359 clk = ERR_PTR(-ENOMEM); 1360 goto fail; 1361 } 1362 1363 init.name = mod->name; 1364 init.ops = &rzg2l_mod_clock_ops; 1365 init.flags = CLK_SET_RATE_PARENT; 1366 for (i = 0; i < info->num_crit_mod_clks; i++) 1367 if (id == info->crit_mod_clks[i]) { 1368 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n", 1369 mod->name); 1370 init.flags |= CLK_IS_CRITICAL; 1371 break; 1372 } 1373 1374 parent_name = __clk_get_name(parent); 1375 init.parent_names = &parent_name; 1376 init.num_parents = 1; 1377 1378 clock->off = mod->off; 1379 clock->bit = mod->bit; 1380 clock->priv = priv; 1381 clock->hw.init = &init; 1382 1383 ret = devm_clk_hw_register(dev, &clock->hw); 1384 if (ret) { 1385 clk = ERR_PTR(ret); 1386 goto fail; 1387 } 1388 1389 clk = clock->hw.clk; 1390 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 1391 priv->clks[id] = clk; 1392 1393 if (mod->is_coupled) { 1394 struct mstp_clock *sibling; 1395 1396 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw); 1397 sibling = rzg2l_mod_clock_get_sibling(clock, priv); 1398 if (sibling) { 1399 clock->sibling = sibling; 1400 sibling->sibling = clock; 1401 } 1402 } 1403 1404 return; 1405 1406 fail: 1407 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module", 1408 mod->name, PTR_ERR(clk)); 1409 } 1410 1411 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev) 1412 1413 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev, 1414 unsigned long id) 1415 { 1416 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); 1417 const struct rzg2l_cpg_info *info = priv->info; 1418 unsigned int reg = info->resets[id].off; 1419 u32 mask = BIT(info->resets[id].bit); 1420 s8 monbit = info->resets[id].monbit; 1421 u32 value = mask << 16; 1422 1423 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); 1424 1425 writel(value, priv->base + CLK_RST_R(reg)); 1426 1427 if (info->has_clk_mon_regs) { 1428 reg = CLK_MRST_R(reg); 1429 } else if (monbit >= 0) { 1430 reg = CPG_RST_MON; 1431 mask = BIT(monbit); 1432 } else { 1433 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ 1434 udelay(35); 1435 return 0; 1436 } 1437 1438 return readl_poll_timeout_atomic(priv->base + reg, value, 1439 value & mask, 10, 200); 1440 } 1441 1442 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, 1443 unsigned long id) 1444 { 1445 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); 1446 const struct rzg2l_cpg_info *info = priv->info; 1447 unsigned int reg = info->resets[id].off; 1448 u32 mask = BIT(info->resets[id].bit); 1449 s8 monbit = info->resets[id].monbit; 1450 u32 value = (mask << 16) | mask; 1451 1452 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, 1453 CLK_RST_R(reg)); 1454 1455 writel(value, priv->base + CLK_RST_R(reg)); 1456 1457 if (info->has_clk_mon_regs) { 1458 reg = CLK_MRST_R(reg); 1459 } else if (monbit >= 0) { 1460 reg = CPG_RST_MON; 1461 mask = BIT(monbit); 1462 } else { 1463 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ 1464 udelay(35); 1465 return 0; 1466 } 1467 1468 return readl_poll_timeout_atomic(priv->base + reg, value, 1469 !(value & mask), 10, 200); 1470 } 1471 1472 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, 1473 unsigned long id) 1474 { 1475 int ret; 1476 1477 ret = rzg2l_cpg_assert(rcdev, id); 1478 if (ret) 1479 return ret; 1480 1481 return rzg2l_cpg_deassert(rcdev, id); 1482 } 1483 1484 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, 1485 unsigned long id) 1486 { 1487 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); 1488 const struct rzg2l_cpg_info *info = priv->info; 1489 s8 monbit = info->resets[id].monbit; 1490 unsigned int reg; 1491 u32 bitmask; 1492 1493 if (info->has_clk_mon_regs) { 1494 reg = CLK_MRST_R(info->resets[id].off); 1495 bitmask = BIT(info->resets[id].bit); 1496 } else if (monbit >= 0) { 1497 reg = CPG_RST_MON; 1498 bitmask = BIT(monbit); 1499 } else { 1500 return -ENOTSUPP; 1501 } 1502 1503 return !!(readl(priv->base + reg) & bitmask); 1504 } 1505 1506 static const struct reset_control_ops rzg2l_cpg_reset_ops = { 1507 .reset = rzg2l_cpg_reset, 1508 .assert = rzg2l_cpg_assert, 1509 .deassert = rzg2l_cpg_deassert, 1510 .status = rzg2l_cpg_status, 1511 }; 1512 1513 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev, 1514 const struct of_phandle_args *reset_spec) 1515 { 1516 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); 1517 const struct rzg2l_cpg_info *info = priv->info; 1518 unsigned int id = reset_spec->args[0]; 1519 1520 if (id >= rcdev->nr_resets || !info->resets[id].off) { 1521 dev_err(rcdev->dev, "Invalid reset index %u\n", id); 1522 return -EINVAL; 1523 } 1524 1525 return id; 1526 } 1527 1528 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv) 1529 { 1530 priv->rcdev.ops = &rzg2l_cpg_reset_ops; 1531 priv->rcdev.of_node = priv->dev->of_node; 1532 priv->rcdev.dev = priv->dev; 1533 priv->rcdev.of_reset_n_cells = 1; 1534 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate; 1535 priv->rcdev.nr_resets = priv->num_resets; 1536 1537 return devm_reset_controller_register(priv->dev, &priv->rcdev); 1538 } 1539 1540 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv, 1541 const struct of_phandle_args *clkspec) 1542 { 1543 const struct rzg2l_cpg_info *info = priv->info; 1544 unsigned int id; 1545 unsigned int i; 1546 1547 if (clkspec->args_count != 2) 1548 return false; 1549 1550 if (clkspec->args[0] != CPG_MOD) 1551 return false; 1552 1553 id = clkspec->args[1] + info->num_total_core_clks; 1554 for (i = 0; i < info->num_no_pm_mod_clks; i++) { 1555 if (info->no_pm_mod_clks[i] == id) 1556 return false; 1557 } 1558 1559 return true; 1560 } 1561 1562 /** 1563 * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure 1564 * @onecell_data: cell data 1565 * @domains: generic PM domains 1566 */ 1567 struct rzg2l_cpg_pm_domains { 1568 struct genpd_onecell_data onecell_data; 1569 struct generic_pm_domain *domains[]; 1570 }; 1571 1572 /** 1573 * struct rzg2l_cpg_pd - RZ/G2L power domain data structure 1574 * @genpd: generic PM domain 1575 * @priv: pointer to CPG private data structure 1576 * @conf: CPG PM domain configuration info 1577 * @id: RZ/G2L power domain ID 1578 */ 1579 struct rzg2l_cpg_pd { 1580 struct generic_pm_domain genpd; 1581 struct rzg2l_cpg_priv *priv; 1582 struct rzg2l_cpg_pm_domain_conf conf; 1583 u16 id; 1584 }; 1585 1586 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) 1587 { 1588 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); 1589 struct rzg2l_cpg_priv *priv = pd->priv; 1590 struct device_node *np = dev->of_node; 1591 struct of_phandle_args clkspec; 1592 bool once = true; 1593 struct clk *clk; 1594 int error; 1595 int i = 0; 1596 1597 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, 1598 &clkspec)) { 1599 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) { 1600 if (once) { 1601 once = false; 1602 error = pm_clk_create(dev); 1603 if (error) { 1604 of_node_put(clkspec.np); 1605 goto err; 1606 } 1607 } 1608 clk = of_clk_get_from_provider(&clkspec); 1609 of_node_put(clkspec.np); 1610 if (IS_ERR(clk)) { 1611 error = PTR_ERR(clk); 1612 goto fail_destroy; 1613 } 1614 1615 error = pm_clk_add_clk(dev, clk); 1616 if (error) { 1617 dev_err(dev, "pm_clk_add_clk failed %d\n", 1618 error); 1619 goto fail_put; 1620 } 1621 } else { 1622 of_node_put(clkspec.np); 1623 } 1624 i++; 1625 } 1626 1627 return 0; 1628 1629 fail_put: 1630 clk_put(clk); 1631 1632 fail_destroy: 1633 pm_clk_destroy(dev); 1634 err: 1635 return error; 1636 } 1637 1638 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev) 1639 { 1640 if (!pm_clk_no_clocks(dev)) 1641 pm_clk_destroy(dev); 1642 } 1643 1644 static void rzg2l_cpg_genpd_remove(void *data) 1645 { 1646 struct genpd_onecell_data *celldata = data; 1647 1648 for (unsigned int i = 0; i < celldata->num_domains; i++) 1649 pm_genpd_remove(celldata->domains[i]); 1650 } 1651 1652 static void rzg2l_cpg_genpd_remove_simple(void *data) 1653 { 1654 pm_genpd_remove(data); 1655 } 1656 1657 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain) 1658 { 1659 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); 1660 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop; 1661 struct rzg2l_cpg_priv *priv = pd->priv; 1662 1663 /* Set MSTOP. */ 1664 if (mstop.mask) 1665 writel(mstop.mask << 16, priv->base + mstop.off); 1666 1667 return 0; 1668 } 1669 1670 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain) 1671 { 1672 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); 1673 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop; 1674 struct rzg2l_cpg_priv *priv = pd->priv; 1675 1676 /* Set MSTOP. */ 1677 if (mstop.mask) 1678 writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off); 1679 1680 return 0; 1681 } 1682 1683 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on) 1684 { 1685 struct dev_power_governor *governor; 1686 1687 pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 1688 pd->genpd.attach_dev = rzg2l_cpg_attach_dev; 1689 pd->genpd.detach_dev = rzg2l_cpg_detach_dev; 1690 if (always_on) { 1691 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; 1692 governor = &pm_domain_always_on_gov; 1693 } else { 1694 pd->genpd.power_on = rzg2l_cpg_power_on; 1695 pd->genpd.power_off = rzg2l_cpg_power_off; 1696 governor = &simple_qos_governor; 1697 } 1698 1699 return pm_genpd_init(&pd->genpd, governor, !always_on); 1700 } 1701 1702 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv) 1703 { 1704 struct device *dev = priv->dev; 1705 struct device_node *np = dev->of_node; 1706 struct rzg2l_cpg_pd *pd; 1707 int ret; 1708 1709 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1710 if (!pd) 1711 return -ENOMEM; 1712 1713 pd->genpd.name = np->name; 1714 pd->priv = priv; 1715 ret = rzg2l_cpg_pd_setup(pd, true); 1716 if (ret) 1717 return ret; 1718 1719 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd); 1720 if (ret) 1721 return ret; 1722 1723 return of_genpd_add_provider_simple(np, &pd->genpd); 1724 } 1725 1726 static struct generic_pm_domain * 1727 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data) 1728 { 1729 struct generic_pm_domain *domain = ERR_PTR(-ENOENT); 1730 struct genpd_onecell_data *genpd = data; 1731 1732 if (spec->args_count != 1) 1733 return ERR_PTR(-EINVAL); 1734 1735 for (unsigned int i = 0; i < genpd->num_domains; i++) { 1736 struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd, 1737 genpd); 1738 1739 if (pd->id == spec->args[0]) { 1740 domain = &pd->genpd; 1741 break; 1742 } 1743 } 1744 1745 return domain; 1746 } 1747 1748 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv) 1749 { 1750 const struct rzg2l_cpg_info *info = priv->info; 1751 struct device *dev = priv->dev; 1752 struct device_node *np = dev->of_node; 1753 struct rzg2l_cpg_pm_domains *domains; 1754 struct generic_pm_domain *parent; 1755 u32 ncells; 1756 int ret; 1757 1758 ret = of_property_read_u32(np, "#power-domain-cells", &ncells); 1759 if (ret) 1760 return ret; 1761 1762 /* For backward compatibility. */ 1763 if (!ncells) 1764 return rzg2l_cpg_add_clk_domain(priv); 1765 1766 domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains), 1767 GFP_KERNEL); 1768 if (!domains) 1769 return -ENOMEM; 1770 1771 domains->onecell_data.domains = domains->domains; 1772 domains->onecell_data.num_domains = info->num_pm_domains; 1773 domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate; 1774 1775 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data); 1776 if (ret) 1777 return ret; 1778 1779 for (unsigned int i = 0; i < info->num_pm_domains; i++) { 1780 bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON); 1781 struct rzg2l_cpg_pd *pd; 1782 1783 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1784 if (!pd) 1785 return -ENOMEM; 1786 1787 pd->genpd.name = info->pm_domains[i].name; 1788 pd->conf = info->pm_domains[i].conf; 1789 pd->id = info->pm_domains[i].id; 1790 pd->priv = priv; 1791 1792 ret = rzg2l_cpg_pd_setup(pd, always_on); 1793 if (ret) 1794 return ret; 1795 1796 if (always_on) { 1797 ret = rzg2l_cpg_power_on(&pd->genpd); 1798 if (ret) 1799 return ret; 1800 } 1801 1802 domains->domains[i] = &pd->genpd; 1803 /* Parent should be on the very first entry of info->pm_domains[]. */ 1804 if (!i) { 1805 parent = &pd->genpd; 1806 continue; 1807 } 1808 1809 ret = pm_genpd_add_subdomain(parent, &pd->genpd); 1810 if (ret) 1811 return ret; 1812 } 1813 1814 ret = of_genpd_add_provider_onecell(np, &domains->onecell_data); 1815 if (ret) 1816 return ret; 1817 1818 return 0; 1819 } 1820 1821 static int __init rzg2l_cpg_probe(struct platform_device *pdev) 1822 { 1823 struct device *dev = &pdev->dev; 1824 struct device_node *np = dev->of_node; 1825 const struct rzg2l_cpg_info *info; 1826 struct rzg2l_cpg_priv *priv; 1827 unsigned int nclks, i; 1828 struct clk **clks; 1829 int error; 1830 1831 info = of_device_get_match_data(dev); 1832 1833 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1834 if (!priv) 1835 return -ENOMEM; 1836 1837 priv->dev = dev; 1838 priv->info = info; 1839 spin_lock_init(&priv->rmw_lock); 1840 1841 priv->base = devm_platform_ioremap_resource(pdev, 0); 1842 if (IS_ERR(priv->base)) 1843 return PTR_ERR(priv->base); 1844 1845 nclks = info->num_total_core_clks + info->num_hw_mod_clks; 1846 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL); 1847 if (!clks) 1848 return -ENOMEM; 1849 1850 dev_set_drvdata(dev, priv); 1851 priv->clks = clks; 1852 priv->num_core_clks = info->num_total_core_clks; 1853 priv->num_mod_clks = info->num_hw_mod_clks; 1854 priv->num_resets = info->num_resets; 1855 priv->last_dt_core_clk = info->last_dt_core_clk; 1856 1857 for (i = 0; i < nclks; i++) 1858 clks[i] = ERR_PTR(-ENOENT); 1859 1860 for (i = 0; i < info->num_core_clks; i++) 1861 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv); 1862 1863 for (i = 0; i < info->num_mod_clks; i++) 1864 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv); 1865 1866 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv); 1867 if (error) 1868 return error; 1869 1870 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np); 1871 if (error) 1872 return error; 1873 1874 error = rzg2l_cpg_add_pm_domains(priv); 1875 if (error) 1876 return error; 1877 1878 error = rzg2l_cpg_reset_controller_register(priv); 1879 if (error) 1880 return error; 1881 1882 return 0; 1883 } 1884 1885 static const struct of_device_id rzg2l_cpg_match[] = { 1886 #ifdef CONFIG_CLK_R9A07G043 1887 { 1888 .compatible = "renesas,r9a07g043-cpg", 1889 .data = &r9a07g043_cpg_info, 1890 }, 1891 #endif 1892 #ifdef CONFIG_CLK_R9A07G044 1893 { 1894 .compatible = "renesas,r9a07g044-cpg", 1895 .data = &r9a07g044_cpg_info, 1896 }, 1897 #endif 1898 #ifdef CONFIG_CLK_R9A07G054 1899 { 1900 .compatible = "renesas,r9a07g054-cpg", 1901 .data = &r9a07g054_cpg_info, 1902 }, 1903 #endif 1904 #ifdef CONFIG_CLK_R9A08G045 1905 { 1906 .compatible = "renesas,r9a08g045-cpg", 1907 .data = &r9a08g045_cpg_info, 1908 }, 1909 #endif 1910 #ifdef CONFIG_CLK_R9A09G011 1911 { 1912 .compatible = "renesas,r9a09g011-cpg", 1913 .data = &r9a09g011_cpg_info, 1914 }, 1915 #endif 1916 { /* sentinel */ } 1917 }; 1918 1919 static struct platform_driver rzg2l_cpg_driver = { 1920 .driver = { 1921 .name = "rzg2l-cpg", 1922 .of_match_table = rzg2l_cpg_match, 1923 }, 1924 }; 1925 1926 static int __init rzg2l_cpg_init(void) 1927 { 1928 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe); 1929 } 1930 1931 subsys_initcall(rzg2l_cpg_init); 1932 1933 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver"); 1934