1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2016 Maxime Ripard 4 * 5 * Maxime Ripard <maxime.ripard@free-electrons.com> 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/clk-provider.h> 10 #include <linux/device.h> 11 #include <linux/iopoll.h> 12 #include <linux/slab.h> 13 14 #include "ccu_common.h" 15 #include "ccu_gate.h" 16 #include "ccu_reset.h" 17 18 struct sunxi_ccu { 19 const struct sunxi_ccu_desc *desc; 20 spinlock_t lock; 21 struct ccu_reset reset; 22 }; 23 24 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock) 25 { 26 void __iomem *addr; 27 u32 reg; 28 29 if (!lock) 30 return; 31 32 if (common->features & CCU_FEATURE_LOCK_REG) 33 addr = common->base + common->lock_reg; 34 else 35 addr = common->base + common->reg; 36 37 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000)); 38 } 39 40 /* 41 * This clock notifier is called when the frequency of a PLL clock is 42 * changed. In common PLL designs, changes to the dividers take effect 43 * almost immediately, while changes to the multipliers (implemented 44 * as dividers in the feedback loop) take a few cycles to work into 45 * the feedback loop for the PLL to stablize. 46 * 47 * Sometimes when the PLL clock rate is changed, the decrease in the 48 * divider is too much for the decrease in the multiplier to catch up. 49 * The PLL clock rate will spike, and in some cases, might lock up 50 * completely. 51 * 52 * This notifier callback will gate and then ungate the clock, 53 * effectively resetting it, so it proceeds to work. Care must be 54 * taken to reparent consumers to other temporary clocks during the 55 * rate change, and that this notifier callback must be the first 56 * to be registered. 57 */ 58 static int ccu_pll_notifier_cb(struct notifier_block *nb, 59 unsigned long event, void *data) 60 { 61 struct ccu_pll_nb *pll = to_ccu_pll_nb(nb); 62 int ret = 0; 63 64 if (event != POST_RATE_CHANGE) 65 goto out; 66 67 ccu_gate_helper_disable(pll->common, pll->enable); 68 69 ret = ccu_gate_helper_enable(pll->common, pll->enable); 70 if (ret) 71 goto out; 72 73 ccu_helper_wait_for_lock(pll->common, pll->lock); 74 75 out: 76 return notifier_from_errno(ret); 77 } 78 79 int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb) 80 { 81 pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb; 82 83 return clk_notifier_register(pll_nb->common->hw.clk, 84 &pll_nb->clk_nb); 85 } 86 87 static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, 88 struct device_node *node, void __iomem *reg, 89 const struct sunxi_ccu_desc *desc) 90 { 91 struct ccu_reset *reset; 92 int i, ret; 93 94 ccu->desc = desc; 95 96 spin_lock_init(&ccu->lock); 97 98 for (i = 0; i < desc->num_ccu_clks; i++) { 99 struct ccu_common *cclk = desc->ccu_clks[i]; 100 101 if (!cclk) 102 continue; 103 104 cclk->base = reg; 105 cclk->lock = &ccu->lock; 106 } 107 108 for (i = 0; i < desc->hw_clks->num ; i++) { 109 struct clk_hw *hw = desc->hw_clks->hws[i]; 110 const char *name; 111 112 if (!hw) 113 continue; 114 115 name = hw->init->name; 116 if (dev) 117 ret = clk_hw_register(dev, hw); 118 else 119 ret = of_clk_hw_register(node, hw); 120 if (ret) { 121 pr_err("Couldn't register clock %d - %s\n", i, name); 122 goto err_clk_unreg; 123 } 124 } 125 126 ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, 127 desc->hw_clks); 128 if (ret) 129 goto err_clk_unreg; 130 131 reset = &ccu->reset; 132 reset->rcdev.of_node = node; 133 reset->rcdev.ops = &ccu_reset_ops; 134 reset->rcdev.owner = dev ? dev->driver->owner : THIS_MODULE; 135 reset->rcdev.nr_resets = desc->num_resets; 136 reset->base = reg; 137 reset->lock = &ccu->lock; 138 reset->reset_map = desc->resets; 139 140 ret = reset_controller_register(&reset->rcdev); 141 if (ret) 142 goto err_del_provider; 143 144 return 0; 145 146 err_del_provider: 147 of_clk_del_provider(node); 148 err_clk_unreg: 149 while (--i >= 0) { 150 struct clk_hw *hw = desc->hw_clks->hws[i]; 151 152 if (!hw) 153 continue; 154 clk_hw_unregister(hw); 155 } 156 return ret; 157 } 158 159 static void devm_sunxi_ccu_release(struct device *dev, void *res) 160 { 161 struct sunxi_ccu *ccu = res; 162 const struct sunxi_ccu_desc *desc = ccu->desc; 163 int i; 164 165 reset_controller_unregister(&ccu->reset.rcdev); 166 of_clk_del_provider(dev->of_node); 167 168 for (i = 0; i < desc->hw_clks->num; i++) { 169 struct clk_hw *hw = desc->hw_clks->hws[i]; 170 171 if (!hw) 172 continue; 173 clk_hw_unregister(hw); 174 } 175 } 176 177 int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg, 178 const struct sunxi_ccu_desc *desc) 179 { 180 struct sunxi_ccu *ccu; 181 int ret; 182 183 ccu = devres_alloc(devm_sunxi_ccu_release, sizeof(*ccu), GFP_KERNEL); 184 if (!ccu) 185 return -ENOMEM; 186 187 ret = sunxi_ccu_probe(ccu, dev, dev->of_node, reg, desc); 188 if (ret) { 189 devres_free(ccu); 190 return ret; 191 } 192 193 devres_add(dev, ccu); 194 195 return 0; 196 } 197 198 void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 199 const struct sunxi_ccu_desc *desc) 200 { 201 struct sunxi_ccu *ccu; 202 int ret; 203 204 ccu = kzalloc(sizeof(*ccu), GFP_KERNEL); 205 if (!ccu) 206 return; 207 208 ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc); 209 if (ret) { 210 pr_err("%pOF: probing clocks failed: %d\n", node, ret); 211 kfree(ccu); 212 } 213 } 214