1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas Clock Pulse Generator / Module Standby and Software Reset 4 * 5 * Copyright (C) 2015 Glider bvba 6 * 7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c 8 * 9 * Copyright (C) 2013 Ideas On Board SPRL 10 * Copyright (C) 2015 Renesas Electronics Corp. 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/clk-provider.h> 15 #include <linux/clk/renesas.h> 16 #include <linux/delay.h> 17 #include <linux/device.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/iopoll.h> 21 #include <linux/mod_devicetable.h> 22 #include <linux/module.h> 23 #include <linux/of_address.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_clock.h> 26 #include <linux/pm_domain.h> 27 #include <linux/psci.h> 28 #include <linux/reset-controller.h> 29 #include <linux/slab.h> 30 #include <linux/string_choices.h> 31 32 #include <dt-bindings/clock/renesas-cpg-mssr.h> 33 34 #include "renesas-cpg-mssr.h" 35 #include "clk-div6.h" 36 37 #ifdef DEBUG 38 #define WARN_DEBUG(x) WARN_ON(x) 39 #else 40 #define WARN_DEBUG(x) do { } while (0) 41 #endif 42 43 /* 44 * Module Standby and Software Reset register offets. 45 * 46 * If the registers exist, these are valid for SH-Mobile, R-Mobile, 47 * R-Car Gen2, R-Car Gen3, and RZ/G1. 48 * These are NOT valid for R-Car Gen1 and RZ/A1! 49 */ 50 51 /* 52 * Module Stop Status Register offsets 53 */ 54 55 static const u16 mstpsr[] = { 56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4, 57 0x9A0, 0x9A4, 0x9A8, 0x9AC, 58 }; 59 60 static const u16 mstpsr_for_gen4[] = { 61 0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C, 62 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C, 63 0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C, 64 0x2E60, 0x2E64, 0x2E68, 0x2E6C, 0x2E70, 0x2E74, 65 }; 66 67 /* 68 * System Module Stop Control Register offsets 69 */ 70 71 static const u16 smstpcr[] = { 72 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C, 73 0x990, 0x994, 0x998, 0x99C, 74 }; 75 76 static const u16 mstpcr_for_gen4[] = { 77 0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C, 78 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C, 79 0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C, 80 0x2D60, 0x2D64, 0x2D68, 0x2D6C, 0x2D70, 0x2D74, 81 }; 82 83 /* 84 * Module Stop Control Register (RZ/T2H) 85 * RZ/T2H has 2 registers blocks, 86 * Bit 12 is used to differentiate them 87 */ 88 89 #define RZT2H_MSTPCR_BLOCK_SHIFT 12 90 #define RZT2H_MSTPCR_OFFSET_MASK GENMASK(11, 0) 91 #define RZT2H_MSTPCR(block, offset) (((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \ 92 ((offset) & RZT2H_MSTPCR_OFFSET_MASK)) 93 94 #define RZT2H_MSTPCR_BLOCK(x) ((x) >> RZT2H_MSTPCR_BLOCK_SHIFT) 95 #define RZT2H_MSTPCR_OFFSET(x) ((x) & RZT2H_MSTPCR_OFFSET_MASK) 96 97 static const u16 mstpcr_for_rzt2h[] = { 98 RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */ 99 RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */ 100 RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */ 101 RZT2H_MSTPCR(0, 0x30c), /* MSTPCRD */ 102 RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */ 103 0, 104 RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */ 105 0, 106 RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */ 107 RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */ 108 RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */ 109 RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */ 110 RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */ 111 RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */ 112 }; 113 114 /* 115 * Standby Control Register offsets (RZ/A) 116 * Base address is FRQCR register 117 */ 118 119 static const u16 stbcr[] = { 120 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420, 121 0x424, 0x428, 0x42C, 122 }; 123 124 /* 125 * Software Reset Register offsets 126 */ 127 128 static const u16 srcr[] = { 129 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC, 130 0x920, 0x924, 0x928, 0x92C, 131 }; 132 133 static const u16 srcr_for_gen4[] = { 134 0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C, 135 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C, 136 0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C, 137 0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74, 138 }; 139 140 /* 141 * Software Reset Clearing Register offsets 142 */ 143 144 static const u16 srstclr[] = { 145 0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C, 146 0x960, 0x964, 0x968, 0x96C, 147 }; 148 149 static const u16 srstclr_for_gen4[] = { 150 0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C, 151 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC, 152 0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC, 153 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC, 0x2CF0, 0x2CF4, 154 }; 155 156 /** 157 * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby 158 * and Software Reset Private Data 159 * 160 * @pub: Data passed to clock registration callback 161 * @rcdev: Optional reset controller entity 162 * @dev: CPG/MSSR device 163 * @reg_layout: CPG/MSSR register layout 164 * @np: Device node in DT for this CPG/MSSR module 165 * @num_core_clks: Number of Core Clocks in clks[] 166 * @num_mod_clks: Number of Module Clocks in clks[] 167 * @last_dt_core_clk: ID of the last Core Clock exported to DT 168 * @status_regs: Pointer to status registers array 169 * @control_regs: Pointer to control registers array 170 * @reset_regs: Pointer to reset registers array 171 * @reset_clear_regs: Pointer to reset clearing registers array 172 * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control 173 * [].val: Saved values of SMSTPCR[] 174 * @reserved_ids: Temporary used, reserved id list 175 * @num_reserved_ids: Temporary used, number of reserved id list 176 * @clks: Array containing all Core and Module Clocks 177 */ 178 struct cpg_mssr_priv { 179 struct cpg_mssr_pub pub; 180 #ifdef CONFIG_RESET_CONTROLLER 181 struct reset_controller_dev rcdev; 182 #endif 183 struct device *dev; 184 enum clk_reg_layout reg_layout; 185 struct device_node *np; 186 187 unsigned int num_core_clks; 188 unsigned int num_mod_clks; 189 unsigned int last_dt_core_clk; 190 191 const u16 *status_regs; 192 const u16 *control_regs; 193 const u16 *reset_regs; 194 const u16 *reset_clear_regs; 195 struct { 196 u32 mask; 197 u32 val; 198 } smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)]; 199 200 unsigned int *reserved_ids; 201 unsigned int num_reserved_ids; 202 203 struct clk *clks[]; 204 }; 205 206 static struct cpg_mssr_priv *cpg_mssr_priv; 207 208 /** 209 * struct mstp_clock - MSTP gating clock 210 * @hw: handle between common and hardware-specific interfaces 211 * @index: MSTP clock number 212 * @priv: CPG/MSSR private data 213 */ 214 struct mstp_clock { 215 struct clk_hw hw; 216 u32 index; 217 struct cpg_mssr_priv *priv; 218 }; 219 220 #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw) 221 222 static u32 cpg_rzt2h_mstp_read(struct clk_hw *hw, u16 offset) 223 { 224 struct mstp_clock *clock = to_mstp_clock(hw); 225 struct cpg_mssr_priv *priv = clock->priv; 226 void __iomem *base = 227 RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0; 228 229 return readl(base + RZT2H_MSTPCR_OFFSET(offset)); 230 } 231 232 static void cpg_rzt2h_mstp_write(struct clk_hw *hw, u16 offset, u32 value) 233 { 234 struct mstp_clock *clock = to_mstp_clock(hw); 235 struct cpg_mssr_priv *priv = clock->priv; 236 void __iomem *base = 237 RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0; 238 239 writel(value, base + RZT2H_MSTPCR_OFFSET(offset)); 240 } 241 242 static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) 243 { 244 struct mstp_clock *clock = to_mstp_clock(hw); 245 struct cpg_mssr_priv *priv = clock->priv; 246 unsigned int reg = clock->index / 32; 247 unsigned int bit = clock->index % 32; 248 struct device *dev = priv->dev; 249 u32 bitmask = BIT(bit); 250 unsigned long flags; 251 u32 value; 252 int error; 253 254 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk, 255 str_on_off(enable)); 256 spin_lock_irqsave(&priv->pub.rmw_lock, flags); 257 258 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) { 259 value = readb(priv->pub.base0 + priv->control_regs[reg]); 260 if (enable) 261 value &= ~bitmask; 262 else 263 value |= bitmask; 264 writeb(value, priv->pub.base0 + priv->control_regs[reg]); 265 266 /* dummy read to ensure write has completed */ 267 readb(priv->pub.base0 + priv->control_regs[reg]); 268 barrier_data(priv->pub.base0 + priv->control_regs[reg]); 269 270 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) { 271 value = cpg_rzt2h_mstp_read(hw, 272 priv->control_regs[reg]); 273 274 if (enable) 275 value &= ~bitmask; 276 else 277 value |= bitmask; 278 279 cpg_rzt2h_mstp_write(hw, 280 priv->control_regs[reg], 281 value); 282 } else { 283 value = readl(priv->pub.base0 + priv->control_regs[reg]); 284 if (enable) 285 value &= ~bitmask; 286 else 287 value |= bitmask; 288 writel(value, priv->pub.base0 + priv->control_regs[reg]); 289 } 290 291 spin_unlock_irqrestore(&priv->pub.rmw_lock, flags); 292 293 if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A || 294 priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) 295 return 0; 296 297 error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg], 298 value, !(value & bitmask), 0, 10); 299 if (error) 300 dev_err(dev, "Failed to enable SMSTP %p[%d]\n", 301 priv->pub.base0 + priv->control_regs[reg], bit); 302 303 return error; 304 } 305 306 static int cpg_mstp_clock_enable(struct clk_hw *hw) 307 { 308 return cpg_mstp_clock_endisable(hw, true); 309 } 310 311 static void cpg_mstp_clock_disable(struct clk_hw *hw) 312 { 313 cpg_mstp_clock_endisable(hw, false); 314 } 315 316 static int cpg_mstp_clock_is_enabled(struct clk_hw *hw) 317 { 318 struct mstp_clock *clock = to_mstp_clock(hw); 319 struct cpg_mssr_priv *priv = clock->priv; 320 unsigned int reg = clock->index / 32; 321 u32 value; 322 323 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) 324 value = readb(priv->pub.base0 + priv->control_regs[reg]); 325 else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) 326 value = cpg_rzt2h_mstp_read(hw, 327 priv->control_regs[reg]); 328 else 329 value = readl(priv->pub.base0 + priv->status_regs[reg]); 330 331 return !(value & BIT(clock->index % 32)); 332 } 333 334 static const struct clk_ops cpg_mstp_clock_ops = { 335 .enable = cpg_mstp_clock_enable, 336 .disable = cpg_mstp_clock_disable, 337 .is_enabled = cpg_mstp_clock_is_enabled, 338 }; 339 340 static 341 struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec, 342 void *data) 343 { 344 unsigned int clkidx = clkspec->args[1]; 345 struct cpg_mssr_priv *priv = data; 346 struct device *dev = priv->dev; 347 unsigned int idx; 348 const char *type; 349 struct clk *clk; 350 int range_check; 351 352 switch (clkspec->args[0]) { 353 case CPG_CORE: 354 type = "core"; 355 if (clkidx > priv->last_dt_core_clk) { 356 dev_err(dev, "Invalid %s clock index %u\n", type, 357 clkidx); 358 return ERR_PTR(-EINVAL); 359 } 360 clk = priv->clks[clkidx]; 361 break; 362 363 case CPG_MOD: 364 type = "module"; 365 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) { 366 idx = MOD_CLK_PACK_10(clkidx); 367 range_check = 7 - (clkidx % 10); 368 } else { 369 idx = MOD_CLK_PACK(clkidx); 370 range_check = 31 - (clkidx % 100); 371 } 372 if (range_check < 0 || idx >= priv->num_mod_clks) { 373 dev_err(dev, "Invalid %s clock index %u\n", type, 374 clkidx); 375 return ERR_PTR(-EINVAL); 376 } 377 clk = priv->clks[priv->num_core_clks + idx]; 378 break; 379 380 default: 381 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]); 382 return ERR_PTR(-EINVAL); 383 } 384 385 if (IS_ERR(clk)) 386 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, 387 PTR_ERR(clk)); 388 else 389 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", 390 clkspec->args[0], clkspec->args[1], clk, 391 clk_get_rate(clk)); 392 return clk; 393 } 394 395 static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, 396 const struct cpg_mssr_info *info, 397 struct cpg_mssr_priv *priv) 398 { 399 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent; 400 struct device *dev = priv->dev; 401 unsigned int id = core->id, div = core->div; 402 const char *parent_name; 403 404 WARN_DEBUG(id >= priv->num_core_clks); 405 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 406 407 switch (core->type) { 408 case CLK_TYPE_IN: 409 clk = of_clk_get_by_name(priv->np, core->name); 410 break; 411 412 case CLK_TYPE_FF: 413 case CLK_TYPE_DIV6P1: 414 case CLK_TYPE_DIV6_RO: 415 WARN_DEBUG(core->parent >= priv->num_core_clks); 416 parent = priv->pub.clks[core->parent]; 417 if (IS_ERR(parent)) { 418 clk = parent; 419 goto fail; 420 } 421 422 parent_name = __clk_get_name(parent); 423 424 if (core->type == CLK_TYPE_DIV6_RO) 425 /* Multiply with the DIV6 register value */ 426 div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1; 427 428 if (core->type == CLK_TYPE_DIV6P1) { 429 clk = cpg_div6_register(core->name, 1, &parent_name, 430 priv->pub.base0 + core->offset, 431 &priv->pub.notifiers); 432 } else { 433 clk = clk_register_fixed_factor(NULL, core->name, 434 parent_name, 0, 435 core->mult, div); 436 } 437 break; 438 439 case CLK_TYPE_FR: 440 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0, 441 core->mult); 442 break; 443 444 default: 445 if (info->cpg_clk_register) 446 clk = info->cpg_clk_register(dev, core, info, 447 &priv->pub); 448 else 449 dev_err(dev, "%s has unsupported core clock type %u\n", 450 core->name, core->type); 451 break; 452 } 453 454 if (IS_ERR_OR_NULL(clk)) 455 goto fail; 456 457 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 458 priv->pub.clks[id] = clk; 459 return; 460 461 fail: 462 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core", 463 core->name, PTR_ERR(clk)); 464 } 465 466 static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, 467 const struct cpg_mssr_info *info, 468 struct cpg_mssr_priv *priv) 469 { 470 struct mstp_clock *clock = NULL; 471 struct device *dev = priv->dev; 472 unsigned int id = mod->id; 473 struct clk_init_data init = {}; 474 struct clk *parent, *clk; 475 const char *parent_name; 476 unsigned int i; 477 478 WARN_DEBUG(id < priv->num_core_clks); 479 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); 480 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); 481 WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT); 482 483 if (!mod->name) { 484 /* Skip NULLified clock */ 485 return; 486 } 487 488 parent = priv->pub.clks[mod->parent]; 489 if (IS_ERR(parent)) { 490 clk = parent; 491 goto fail; 492 } 493 494 clock = kzalloc(sizeof(*clock), GFP_KERNEL); 495 if (!clock) { 496 clk = ERR_PTR(-ENOMEM); 497 goto fail; 498 } 499 500 init.name = mod->name; 501 init.ops = &cpg_mstp_clock_ops; 502 init.flags = CLK_SET_RATE_PARENT; 503 parent_name = __clk_get_name(parent); 504 init.parent_names = &parent_name; 505 init.num_parents = 1; 506 507 clock->index = id - priv->num_core_clks; 508 clock->priv = priv; 509 clock->hw.init = &init; 510 511 for (i = 0; i < info->num_crit_mod_clks; i++) 512 if (id == info->crit_mod_clks[i] && 513 cpg_mstp_clock_is_enabled(&clock->hw)) { 514 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n", 515 mod->name); 516 init.flags |= CLK_IS_CRITICAL; 517 break; 518 } 519 520 /* 521 * Ignore reserved device. 522 * see 523 * cpg_mssr_reserved_init() 524 */ 525 for (i = 0; i < priv->num_reserved_ids; i++) { 526 if (id == priv->reserved_ids[i]) { 527 dev_info(dev, "Ignore Linux non-assigned mod (%s)\n", mod->name); 528 init.flags |= CLK_IGNORE_UNUSED; 529 break; 530 } 531 } 532 533 clk = clk_register(NULL, &clock->hw); 534 if (IS_ERR(clk)) 535 goto fail; 536 537 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 538 priv->clks[id] = clk; 539 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32); 540 return; 541 542 fail: 543 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module", 544 mod->name, PTR_ERR(clk)); 545 kfree(clock); 546 } 547 548 struct cpg_mssr_clk_domain { 549 struct generic_pm_domain genpd; 550 unsigned int num_core_pm_clks; 551 unsigned int core_pm_clks[]; 552 }; 553 554 static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain; 555 556 static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec, 557 struct cpg_mssr_clk_domain *pd) 558 { 559 unsigned int i; 560 561 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) 562 return false; 563 564 switch (clkspec->args[0]) { 565 case CPG_CORE: 566 for (i = 0; i < pd->num_core_pm_clks; i++) 567 if (clkspec->args[1] == pd->core_pm_clks[i]) 568 return true; 569 return false; 570 571 case CPG_MOD: 572 return true; 573 574 default: 575 return false; 576 } 577 } 578 579 int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev) 580 { 581 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain; 582 struct device_node *np = dev->of_node; 583 struct of_phandle_args clkspec; 584 struct clk *clk; 585 int i = 0; 586 int error; 587 588 if (!pd) { 589 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n"); 590 return -EPROBE_DEFER; 591 } 592 593 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, 594 &clkspec)) { 595 if (cpg_mssr_is_pm_clk(&clkspec, pd)) 596 goto found; 597 598 of_node_put(clkspec.np); 599 i++; 600 } 601 602 return 0; 603 604 found: 605 clk = of_clk_get_from_provider(&clkspec); 606 of_node_put(clkspec.np); 607 608 if (IS_ERR(clk)) 609 return PTR_ERR(clk); 610 611 error = pm_clk_create(dev); 612 if (error) 613 goto fail_put; 614 615 error = pm_clk_add_clk(dev, clk); 616 if (error) 617 goto fail_destroy; 618 619 return 0; 620 621 fail_destroy: 622 pm_clk_destroy(dev); 623 fail_put: 624 clk_put(clk); 625 return error; 626 } 627 628 void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev) 629 { 630 if (!pm_clk_no_clocks(dev)) 631 pm_clk_destroy(dev); 632 } 633 634 static void cpg_mssr_genpd_remove(void *data) 635 { 636 pm_genpd_remove(data); 637 } 638 639 static int __init cpg_mssr_add_clk_domain(struct device *dev, 640 const unsigned int *core_pm_clks, 641 unsigned int num_core_pm_clks) 642 { 643 struct device_node *np = dev->of_node; 644 struct generic_pm_domain *genpd; 645 struct cpg_mssr_clk_domain *pd; 646 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]); 647 int ret; 648 649 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL); 650 if (!pd) 651 return -ENOMEM; 652 653 pd->num_core_pm_clks = num_core_pm_clks; 654 memcpy(pd->core_pm_clks, core_pm_clks, pm_size); 655 656 genpd = &pd->genpd; 657 genpd->name = np->name; 658 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | 659 GENPD_FLAG_ACTIVE_WAKEUP; 660 genpd->attach_dev = cpg_mssr_attach_dev; 661 genpd->detach_dev = cpg_mssr_detach_dev; 662 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false); 663 if (ret) 664 return ret; 665 666 ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd); 667 if (ret) 668 return ret; 669 670 cpg_mssr_clk_domain = pd; 671 672 return of_genpd_add_provider_simple(np, genpd); 673 } 674 675 #ifdef CONFIG_RESET_CONTROLLER 676 677 #define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev) 678 679 static int cpg_mssr_reset(struct reset_controller_dev *rcdev, 680 unsigned long id) 681 { 682 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); 683 unsigned int reg = id / 32; 684 unsigned int bit = id % 32; 685 u32 bitmask = BIT(bit); 686 687 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); 688 689 /* Reset module */ 690 writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]); 691 692 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ 693 udelay(35); 694 695 /* Release module from reset state */ 696 writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]); 697 698 return 0; 699 } 700 701 static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) 702 { 703 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); 704 unsigned int reg = id / 32; 705 unsigned int bit = id % 32; 706 u32 bitmask = BIT(bit); 707 708 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); 709 710 writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]); 711 return 0; 712 } 713 714 static int cpg_mssr_deassert(struct reset_controller_dev *rcdev, 715 unsigned long id) 716 { 717 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); 718 unsigned int reg = id / 32; 719 unsigned int bit = id % 32; 720 u32 bitmask = BIT(bit); 721 722 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit); 723 724 writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]); 725 return 0; 726 } 727 728 static int cpg_mssr_status(struct reset_controller_dev *rcdev, 729 unsigned long id) 730 { 731 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); 732 unsigned int reg = id / 32; 733 unsigned int bit = id % 32; 734 u32 bitmask = BIT(bit); 735 736 return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask); 737 } 738 739 static const struct reset_control_ops cpg_mssr_reset_ops = { 740 .reset = cpg_mssr_reset, 741 .assert = cpg_mssr_assert, 742 .deassert = cpg_mssr_deassert, 743 .status = cpg_mssr_status, 744 }; 745 746 static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev, 747 const struct of_phandle_args *reset_spec) 748 { 749 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev); 750 unsigned int unpacked = reset_spec->args[0]; 751 unsigned int idx = MOD_CLK_PACK(unpacked); 752 753 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) { 754 dev_err(priv->dev, "Invalid reset index %u\n", unpacked); 755 return -EINVAL; 756 } 757 758 return idx; 759 } 760 761 static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) 762 { 763 priv->rcdev.ops = &cpg_mssr_reset_ops; 764 priv->rcdev.of_node = priv->dev->of_node; 765 priv->rcdev.of_reset_n_cells = 1; 766 priv->rcdev.of_xlate = cpg_mssr_reset_xlate; 767 priv->rcdev.nr_resets = priv->num_mod_clks; 768 return devm_reset_controller_register(priv->dev, &priv->rcdev); 769 } 770 771 #else /* !CONFIG_RESET_CONTROLLER */ 772 static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) 773 { 774 return 0; 775 } 776 #endif /* !CONFIG_RESET_CONTROLLER */ 777 778 static const struct of_device_id cpg_mssr_match[] = { 779 #ifdef CONFIG_CLK_R7S9210 780 { 781 .compatible = "renesas,r7s9210-cpg-mssr", 782 .data = &r7s9210_cpg_mssr_info, 783 }, 784 #endif 785 #ifdef CONFIG_CLK_R8A7742 786 { 787 .compatible = "renesas,r8a7742-cpg-mssr", 788 .data = &r8a7742_cpg_mssr_info, 789 }, 790 #endif 791 #ifdef CONFIG_CLK_R8A7743 792 { 793 .compatible = "renesas,r8a7743-cpg-mssr", 794 .data = &r8a7743_cpg_mssr_info, 795 }, 796 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */ 797 { 798 .compatible = "renesas,r8a7744-cpg-mssr", 799 .data = &r8a7743_cpg_mssr_info, 800 }, 801 #endif 802 #ifdef CONFIG_CLK_R8A7745 803 { 804 .compatible = "renesas,r8a7745-cpg-mssr", 805 .data = &r8a7745_cpg_mssr_info, 806 }, 807 #endif 808 #ifdef CONFIG_CLK_R8A77470 809 { 810 .compatible = "renesas,r8a77470-cpg-mssr", 811 .data = &r8a77470_cpg_mssr_info, 812 }, 813 #endif 814 #ifdef CONFIG_CLK_R8A774A1 815 { 816 .compatible = "renesas,r8a774a1-cpg-mssr", 817 .data = &r8a774a1_cpg_mssr_info, 818 }, 819 #endif 820 #ifdef CONFIG_CLK_R8A774B1 821 { 822 .compatible = "renesas,r8a774b1-cpg-mssr", 823 .data = &r8a774b1_cpg_mssr_info, 824 }, 825 #endif 826 #ifdef CONFIG_CLK_R8A774C0 827 { 828 .compatible = "renesas,r8a774c0-cpg-mssr", 829 .data = &r8a774c0_cpg_mssr_info, 830 }, 831 #endif 832 #ifdef CONFIG_CLK_R8A774E1 833 { 834 .compatible = "renesas,r8a774e1-cpg-mssr", 835 .data = &r8a774e1_cpg_mssr_info, 836 }, 837 #endif 838 #ifdef CONFIG_CLK_R8A7790 839 { 840 .compatible = "renesas,r8a7790-cpg-mssr", 841 .data = &r8a7790_cpg_mssr_info, 842 }, 843 #endif 844 #ifdef CONFIG_CLK_R8A7791 845 { 846 .compatible = "renesas,r8a7791-cpg-mssr", 847 .data = &r8a7791_cpg_mssr_info, 848 }, 849 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */ 850 { 851 .compatible = "renesas,r8a7793-cpg-mssr", 852 .data = &r8a7791_cpg_mssr_info, 853 }, 854 #endif 855 #ifdef CONFIG_CLK_R8A7792 856 { 857 .compatible = "renesas,r8a7792-cpg-mssr", 858 .data = &r8a7792_cpg_mssr_info, 859 }, 860 #endif 861 #ifdef CONFIG_CLK_R8A7794 862 { 863 .compatible = "renesas,r8a7794-cpg-mssr", 864 .data = &r8a7794_cpg_mssr_info, 865 }, 866 #endif 867 #ifdef CONFIG_CLK_R8A7795 868 { 869 .compatible = "renesas,r8a7795-cpg-mssr", 870 .data = &r8a7795_cpg_mssr_info, 871 }, 872 #endif 873 #ifdef CONFIG_CLK_R8A77960 874 { 875 .compatible = "renesas,r8a7796-cpg-mssr", 876 .data = &r8a7796_cpg_mssr_info, 877 }, 878 #endif 879 #ifdef CONFIG_CLK_R8A77961 880 { 881 .compatible = "renesas,r8a77961-cpg-mssr", 882 .data = &r8a7796_cpg_mssr_info, 883 }, 884 #endif 885 #ifdef CONFIG_CLK_R8A77965 886 { 887 .compatible = "renesas,r8a77965-cpg-mssr", 888 .data = &r8a77965_cpg_mssr_info, 889 }, 890 #endif 891 #ifdef CONFIG_CLK_R8A77970 892 { 893 .compatible = "renesas,r8a77970-cpg-mssr", 894 .data = &r8a77970_cpg_mssr_info, 895 }, 896 #endif 897 #ifdef CONFIG_CLK_R8A77980 898 { 899 .compatible = "renesas,r8a77980-cpg-mssr", 900 .data = &r8a77980_cpg_mssr_info, 901 }, 902 #endif 903 #ifdef CONFIG_CLK_R8A77990 904 { 905 .compatible = "renesas,r8a77990-cpg-mssr", 906 .data = &r8a77990_cpg_mssr_info, 907 }, 908 #endif 909 #ifdef CONFIG_CLK_R8A77995 910 { 911 .compatible = "renesas,r8a77995-cpg-mssr", 912 .data = &r8a77995_cpg_mssr_info, 913 }, 914 #endif 915 #ifdef CONFIG_CLK_R8A779A0 916 { 917 .compatible = "renesas,r8a779a0-cpg-mssr", 918 .data = &r8a779a0_cpg_mssr_info, 919 }, 920 #endif 921 #ifdef CONFIG_CLK_R8A779F0 922 { 923 .compatible = "renesas,r8a779f0-cpg-mssr", 924 .data = &r8a779f0_cpg_mssr_info, 925 }, 926 #endif 927 #ifdef CONFIG_CLK_R8A779G0 928 { 929 .compatible = "renesas,r8a779g0-cpg-mssr", 930 .data = &r8a779g0_cpg_mssr_info, 931 }, 932 #endif 933 #ifdef CONFIG_CLK_R8A779H0 934 { 935 .compatible = "renesas,r8a779h0-cpg-mssr", 936 .data = &r8a779h0_cpg_mssr_info, 937 }, 938 #endif 939 #ifdef CONFIG_CLK_R9A09G077 940 { 941 .compatible = "renesas,r9a09g077-cpg-mssr", 942 .data = &r9a09g077_cpg_mssr_info, 943 }, 944 #endif 945 #ifdef CONFIG_CLK_R9A09G087 946 { 947 .compatible = "renesas,r9a09g087-cpg-mssr", 948 .data = &r9a09g077_cpg_mssr_info, 949 }, 950 #endif 951 { /* sentinel */ } 952 }; 953 954 static void cpg_mssr_del_clk_provider(void *data) 955 { 956 of_clk_del_provider(data); 957 } 958 959 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW) 960 static int cpg_mssr_suspend_noirq(struct device *dev) 961 { 962 struct cpg_mssr_priv *priv = dev_get_drvdata(dev); 963 unsigned int reg; 964 965 /* This is the best we can do to check for the presence of PSCI */ 966 if (!psci_ops.cpu_suspend) 967 return 0; 968 969 /* Save module registers with bits under our control */ 970 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { 971 if (priv->smstpcr_saved[reg].mask) 972 priv->smstpcr_saved[reg].val = 973 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ? 974 readb(priv->pub.base0 + priv->control_regs[reg]) : 975 readl(priv->pub.base0 + priv->control_regs[reg]); 976 } 977 978 /* Save core clocks */ 979 raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL); 980 981 return 0; 982 } 983 984 static int cpg_mssr_resume_noirq(struct device *dev) 985 { 986 struct cpg_mssr_priv *priv = dev_get_drvdata(dev); 987 unsigned int reg; 988 u32 mask, oldval, newval; 989 int error; 990 991 /* This is the best we can do to check for the presence of PSCI */ 992 if (!psci_ops.cpu_suspend) 993 return 0; 994 995 /* Restore core clocks */ 996 raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL); 997 998 /* Restore module clocks */ 999 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { 1000 mask = priv->smstpcr_saved[reg].mask; 1001 if (!mask) 1002 continue; 1003 1004 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) 1005 oldval = readb(priv->pub.base0 + priv->control_regs[reg]); 1006 else 1007 oldval = readl(priv->pub.base0 + priv->control_regs[reg]); 1008 newval = oldval & ~mask; 1009 newval |= priv->smstpcr_saved[reg].val & mask; 1010 if (newval == oldval) 1011 continue; 1012 1013 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) { 1014 writeb(newval, priv->pub.base0 + priv->control_regs[reg]); 1015 /* dummy read to ensure write has completed */ 1016 readb(priv->pub.base0 + priv->control_regs[reg]); 1017 barrier_data(priv->pub.base0 + priv->control_regs[reg]); 1018 continue; 1019 } else 1020 writel(newval, priv->pub.base0 + priv->control_regs[reg]); 1021 1022 /* Wait until enabled clocks are really enabled */ 1023 mask &= ~priv->smstpcr_saved[reg].val; 1024 if (!mask) 1025 continue; 1026 1027 error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg], 1028 oldval, !(oldval & mask), 0, 10); 1029 if (error) 1030 dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg, 1031 oldval & mask); 1032 } 1033 1034 return 0; 1035 } 1036 1037 static const struct dev_pm_ops cpg_mssr_pm = { 1038 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq, 1039 cpg_mssr_resume_noirq) 1040 }; 1041 #define DEV_PM_OPS &cpg_mssr_pm 1042 #else 1043 #define DEV_PM_OPS NULL 1044 #endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */ 1045 1046 static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv) 1047 { 1048 kfree(priv->reserved_ids); 1049 } 1050 1051 static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv, 1052 const struct cpg_mssr_info *info) 1053 { 1054 struct device_node *soc __free(device_node) = of_find_node_by_path("/soc"); 1055 struct device_node *node; 1056 uint32_t args[MAX_PHANDLE_ARGS]; 1057 unsigned int *ids = NULL; 1058 unsigned int num = 0; 1059 1060 /* 1061 * Because clk_disable_unused() will disable all unused clocks, the device which is assigned 1062 * to a non-Linux system will be disabled when Linux is booted. 1063 * 1064 * To avoid such situation, renesas-cpg-mssr assumes the device which has 1065 * status = "reserved" is assigned to a non-Linux system, and adds CLK_IGNORE_UNUSED flag 1066 * to its CPG_MOD clocks. 1067 * see also 1068 * cpg_mssr_register_mod_clk() 1069 * 1070 * scif5: serial@e6f30000 { 1071 * ... 1072 * => clocks = <&cpg CPG_MOD 202>, 1073 * <&cpg CPG_CORE R8A7795_CLK_S3D1>, 1074 * <&scif_clk>; 1075 * ... 1076 * status = "reserved"; 1077 * }; 1078 */ 1079 for_each_reserved_child_of_node(soc, node) { 1080 struct of_phandle_iterator it; 1081 int rc; 1082 1083 of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) { 1084 int idx; 1085 1086 if (it.node != priv->np) 1087 continue; 1088 1089 if (of_phandle_iterator_args(&it, args, MAX_PHANDLE_ARGS) != 2) 1090 continue; 1091 1092 if (args[0] != CPG_MOD) 1093 continue; 1094 1095 ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL); 1096 if (!ids) { 1097 of_node_put(it.node); 1098 return -ENOMEM; 1099 } 1100 1101 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) 1102 idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */ 1103 else 1104 idx = MOD_CLK_PACK(args[1]); /* for DEF_MOD() */ 1105 1106 ids[num] = info->num_total_core_clks + idx; 1107 1108 num++; 1109 } 1110 } 1111 1112 priv->num_reserved_ids = num; 1113 priv->reserved_ids = ids; 1114 1115 return 0; 1116 } 1117 1118 static int __init cpg_mssr_common_init(struct device *dev, 1119 struct device_node *np, 1120 const struct cpg_mssr_info *info) 1121 { 1122 struct cpg_mssr_priv *priv; 1123 unsigned int nclks, i; 1124 int error; 1125 1126 if (info->init) { 1127 error = info->init(dev); 1128 if (error) 1129 return error; 1130 } 1131 1132 nclks = info->num_total_core_clks + info->num_hw_mod_clks; 1133 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL); 1134 if (!priv) 1135 return -ENOMEM; 1136 1137 priv->pub.clks = priv->clks; 1138 priv->np = np; 1139 priv->dev = dev; 1140 spin_lock_init(&priv->pub.rmw_lock); 1141 1142 priv->pub.base0 = of_iomap(np, 0); 1143 if (!priv->pub.base0) { 1144 error = -ENOMEM; 1145 goto out_err; 1146 } 1147 if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) { 1148 priv->pub.base1 = of_iomap(np, 1); 1149 if (!priv->pub.base1) { 1150 error = -ENOMEM; 1151 goto out_err; 1152 } 1153 } 1154 1155 priv->num_core_clks = info->num_total_core_clks; 1156 priv->num_mod_clks = info->num_hw_mod_clks; 1157 priv->last_dt_core_clk = info->last_dt_core_clk; 1158 RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers); 1159 priv->reg_layout = info->reg_layout; 1160 if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) { 1161 priv->status_regs = mstpsr; 1162 priv->control_regs = smstpcr; 1163 priv->reset_regs = srcr; 1164 priv->reset_clear_regs = srstclr; 1165 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) { 1166 priv->control_regs = stbcr; 1167 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) { 1168 priv->control_regs = mstpcr_for_rzt2h; 1169 } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) { 1170 priv->status_regs = mstpsr_for_gen4; 1171 priv->control_regs = mstpcr_for_gen4; 1172 priv->reset_regs = srcr_for_gen4; 1173 priv->reset_clear_regs = srstclr_for_gen4; 1174 } else { 1175 error = -EINVAL; 1176 goto out_err; 1177 } 1178 1179 for (i = 0; i < nclks; i++) 1180 priv->pub.clks[i] = ERR_PTR(-ENOENT); 1181 1182 error = cpg_mssr_reserved_init(priv, info); 1183 if (error) 1184 goto out_err; 1185 1186 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv); 1187 if (error) 1188 goto reserve_err; 1189 1190 cpg_mssr_priv = priv; 1191 1192 return 0; 1193 1194 reserve_err: 1195 cpg_mssr_reserved_exit(priv); 1196 out_err: 1197 if (priv->pub.base0) 1198 iounmap(priv->pub.base0); 1199 if (priv->pub.base1) 1200 iounmap(priv->pub.base1); 1201 kfree(priv); 1202 1203 return error; 1204 } 1205 1206 void __init cpg_mssr_early_init(struct device_node *np, 1207 const struct cpg_mssr_info *info) 1208 { 1209 int error; 1210 int i; 1211 1212 error = cpg_mssr_common_init(NULL, np, info); 1213 if (error) 1214 return; 1215 1216 for (i = 0; i < info->num_early_core_clks; i++) 1217 cpg_mssr_register_core_clk(&info->early_core_clks[i], info, 1218 cpg_mssr_priv); 1219 1220 for (i = 0; i < info->num_early_mod_clks; i++) 1221 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info, 1222 cpg_mssr_priv); 1223 1224 } 1225 1226 static int __init cpg_mssr_probe(struct platform_device *pdev) 1227 { 1228 struct device *dev = &pdev->dev; 1229 struct device_node *np = dev->of_node; 1230 const struct cpg_mssr_info *info; 1231 struct cpg_mssr_priv *priv; 1232 unsigned int i; 1233 int error; 1234 1235 info = of_device_get_match_data(dev); 1236 1237 if (!cpg_mssr_priv) { 1238 error = cpg_mssr_common_init(dev, dev->of_node, info); 1239 if (error) 1240 return error; 1241 } 1242 1243 priv = cpg_mssr_priv; 1244 priv->dev = dev; 1245 dev_set_drvdata(dev, priv); 1246 1247 for (i = 0; i < info->num_core_clks; i++) 1248 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv); 1249 1250 for (i = 0; i < info->num_mod_clks; i++) 1251 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv); 1252 1253 error = devm_add_action_or_reset(dev, 1254 cpg_mssr_del_clk_provider, 1255 np); 1256 if (error) 1257 goto reserve_exit; 1258 1259 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks, 1260 info->num_core_pm_clks); 1261 if (error) 1262 goto reserve_exit; 1263 1264 /* Reset Controller not supported for Standby Control SoCs */ 1265 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A || 1266 priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) 1267 goto reserve_exit; 1268 1269 error = cpg_mssr_reset_controller_register(priv); 1270 1271 reserve_exit: 1272 cpg_mssr_reserved_exit(priv); 1273 1274 return error; 1275 } 1276 1277 static struct platform_driver cpg_mssr_driver = { 1278 .driver = { 1279 .name = "renesas-cpg-mssr", 1280 .of_match_table = cpg_mssr_match, 1281 .pm = DEV_PM_OPS, 1282 }, 1283 }; 1284 1285 static int __init cpg_mssr_init(void) 1286 { 1287 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe); 1288 } 1289 1290 subsys_initcall(cpg_mssr_init); 1291 1292 void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks, 1293 unsigned int num_mod_clks, 1294 const unsigned int *clks, unsigned int n) 1295 { 1296 unsigned int i, j; 1297 1298 for (i = 0, j = 0; i < num_mod_clks && j < n; i++) 1299 if (mod_clks[i].id == clks[j]) { 1300 mod_clks[i].name = NULL; 1301 j++; 1302 } 1303 } 1304 1305 MODULE_DESCRIPTION("Renesas CPG/MSSR Driver"); 1306