1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
6 * Gated clock implementation
7 */
8
9 #include <linux/clk-provider.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/io.h>
14 #include <linux/err.h>
15 #include <linux/string.h>
16
17 /**
18 * DOC: basic gatable clock which can gate and ungate its output
19 *
20 * Traits of this clock:
21 * prepare - clk_(un)prepare only ensures parent is (un)prepared
22 * enable - clk_enable and clk_disable are functional & control gating
23 * rate - inherits rate from parent. No clk_set_rate support
24 * parent - fixed parent. No clk_set_parent support
25 */
26
clk_gate_readl(struct clk_gate * gate)27 static inline u32 clk_gate_readl(struct clk_gate *gate)
28 {
29 if (gate->flags & CLK_GATE_BIG_ENDIAN)
30 return ioread32be(gate->reg);
31
32 return readl(gate->reg);
33 }
34
clk_gate_writel(struct clk_gate * gate,u32 val)35 static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
36 {
37 if (gate->flags & CLK_GATE_BIG_ENDIAN)
38 iowrite32be(val, gate->reg);
39 else
40 writel(val, gate->reg);
41 }
42
43 /*
44 * It works on following logic:
45 *
46 * For enabling clock, enable = 1
47 * set2dis = 1 -> clear bit -> set = 0
48 * set2dis = 0 -> set bit -> set = 1
49 *
50 * For disabling clock, enable = 0
51 * set2dis = 1 -> set bit -> set = 1
52 * set2dis = 0 -> clear bit -> set = 0
53 *
54 * So, result is always: enable xor set2dis.
55 */
clk_gate_endisable(struct clk_hw * hw,int enable)56 static void clk_gate_endisable(struct clk_hw *hw, int enable)
57 {
58 struct clk_gate *gate = to_clk_gate(hw);
59 int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
60 unsigned long flags;
61 u32 reg;
62
63 set ^= enable;
64
65 if (gate->lock)
66 spin_lock_irqsave(gate->lock, flags);
67 else
68 __acquire(gate->lock);
69
70 if (gate->flags & CLK_GATE_HIWORD_MASK) {
71 reg = BIT(gate->bit_idx + 16);
72 if (set)
73 reg |= BIT(gate->bit_idx);
74 } else {
75 reg = clk_gate_readl(gate);
76
77 if (set)
78 reg |= BIT(gate->bit_idx);
79 else
80 reg &= ~BIT(gate->bit_idx);
81 }
82
83 clk_gate_writel(gate, reg);
84
85 if (gate->lock)
86 spin_unlock_irqrestore(gate->lock, flags);
87 else
88 __release(gate->lock);
89 }
90
clk_gate_enable(struct clk_hw * hw)91 static int clk_gate_enable(struct clk_hw *hw)
92 {
93 clk_gate_endisable(hw, 1);
94
95 return 0;
96 }
97
clk_gate_disable(struct clk_hw * hw)98 static void clk_gate_disable(struct clk_hw *hw)
99 {
100 clk_gate_endisable(hw, 0);
101 }
102
clk_gate_is_enabled(struct clk_hw * hw)103 int clk_gate_is_enabled(struct clk_hw *hw)
104 {
105 u32 reg;
106 struct clk_gate *gate = to_clk_gate(hw);
107
108 reg = clk_gate_readl(gate);
109
110 /* if a set bit disables this clk, flip it before masking */
111 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
112 reg ^= BIT(gate->bit_idx);
113
114 reg &= BIT(gate->bit_idx);
115
116 return reg ? 1 : 0;
117 }
118 EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
119
120 const struct clk_ops clk_gate_ops = {
121 .enable = clk_gate_enable,
122 .disable = clk_gate_disable,
123 .is_enabled = clk_gate_is_enabled,
124 };
125 EXPORT_SYMBOL_GPL(clk_gate_ops);
126
__clk_hw_register_gate(struct device * dev,struct device_node * np,const char * name,const char * parent_name,const struct clk_hw * parent_hw,const struct clk_parent_data * parent_data,unsigned long flags,void __iomem * reg,u8 bit_idx,u8 clk_gate_flags,spinlock_t * lock)127 struct clk_hw *__clk_hw_register_gate(struct device *dev,
128 struct device_node *np, const char *name,
129 const char *parent_name, const struct clk_hw *parent_hw,
130 const struct clk_parent_data *parent_data,
131 unsigned long flags,
132 void __iomem *reg, u8 bit_idx,
133 u8 clk_gate_flags, spinlock_t *lock)
134 {
135 struct clk_gate *gate;
136 struct clk_hw *hw;
137 struct clk_init_data init = {};
138 int ret = -EINVAL;
139
140 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
141 if (bit_idx > 15) {
142 pr_err("gate bit exceeds LOWORD field\n");
143 return ERR_PTR(-EINVAL);
144 }
145 }
146
147 /* allocate the gate */
148 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
149 if (!gate)
150 return ERR_PTR(-ENOMEM);
151
152 init.name = name;
153 init.ops = &clk_gate_ops;
154 init.flags = flags;
155 init.parent_names = parent_name ? &parent_name : NULL;
156 init.parent_hws = parent_hw ? &parent_hw : NULL;
157 init.parent_data = parent_data;
158 if (parent_name || parent_hw || parent_data)
159 init.num_parents = 1;
160 else
161 init.num_parents = 0;
162
163 /* struct clk_gate assignments */
164 gate->reg = reg;
165 gate->bit_idx = bit_idx;
166 gate->flags = clk_gate_flags;
167 gate->lock = lock;
168 gate->hw.init = &init;
169
170 hw = &gate->hw;
171 if (dev || !np)
172 ret = clk_hw_register(dev, hw);
173 else if (np)
174 ret = of_clk_hw_register(np, hw);
175 if (ret) {
176 kfree(gate);
177 hw = ERR_PTR(ret);
178 }
179
180 return hw;
181
182 }
183 EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
184
clk_register_gate(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u8 bit_idx,u8 clk_gate_flags,spinlock_t * lock)185 struct clk *clk_register_gate(struct device *dev, const char *name,
186 const char *parent_name, unsigned long flags,
187 void __iomem *reg, u8 bit_idx,
188 u8 clk_gate_flags, spinlock_t *lock)
189 {
190 struct clk_hw *hw;
191
192 hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
193 bit_idx, clk_gate_flags, lock);
194 if (IS_ERR(hw))
195 return ERR_CAST(hw);
196 return hw->clk;
197 }
198 EXPORT_SYMBOL_GPL(clk_register_gate);
199
clk_unregister_gate(struct clk * clk)200 void clk_unregister_gate(struct clk *clk)
201 {
202 struct clk_gate *gate;
203 struct clk_hw *hw;
204
205 hw = __clk_get_hw(clk);
206 if (!hw)
207 return;
208
209 gate = to_clk_gate(hw);
210
211 clk_unregister(clk);
212 kfree(gate);
213 }
214 EXPORT_SYMBOL_GPL(clk_unregister_gate);
215
clk_hw_unregister_gate(struct clk_hw * hw)216 void clk_hw_unregister_gate(struct clk_hw *hw)
217 {
218 struct clk_gate *gate;
219
220 gate = to_clk_gate(hw);
221
222 clk_hw_unregister(hw);
223 kfree(gate);
224 }
225 EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
226
devm_clk_hw_release_gate(struct device * dev,void * res)227 static void devm_clk_hw_release_gate(struct device *dev, void *res)
228 {
229 clk_hw_unregister_gate(*(struct clk_hw **)res);
230 }
231
__devm_clk_hw_register_gate(struct device * dev,struct device_node * np,const char * name,const char * parent_name,const struct clk_hw * parent_hw,const struct clk_parent_data * parent_data,unsigned long flags,void __iomem * reg,u8 bit_idx,u8 clk_gate_flags,spinlock_t * lock)232 struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
233 struct device_node *np, const char *name,
234 const char *parent_name, const struct clk_hw *parent_hw,
235 const struct clk_parent_data *parent_data,
236 unsigned long flags,
237 void __iomem *reg, u8 bit_idx,
238 u8 clk_gate_flags, spinlock_t *lock)
239 {
240 struct clk_hw **ptr, *hw;
241
242 ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
243 if (!ptr)
244 return ERR_PTR(-ENOMEM);
245
246 hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
247 parent_data, flags, reg, bit_idx,
248 clk_gate_flags, lock);
249
250 if (!IS_ERR(hw)) {
251 *ptr = hw;
252 devres_add(dev, ptr);
253 } else {
254 devres_free(ptr);
255 }
256
257 return hw;
258 }
259 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);
260