xref: /linux/drivers/clk/clk-devres.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/clk.h>
3 #include <linux/device.h>
4 #include <linux/export.h>
5 #include <linux/gfp.h>
6 
7 struct devm_clk_state {
8 	struct clk *clk;
9 	void (*exit)(struct clk *clk);
10 };
11 
12 static void devm_clk_release(struct device *dev, void *res)
13 {
14 	struct devm_clk_state *state = res;
15 
16 	if (state->exit)
17 		state->exit(state->clk);
18 
19 	clk_put(state->clk);
20 }
21 
22 static struct clk *__devm_clk_get(struct device *dev, const char *id,
23 				  struct clk *(*get)(struct device *dev, const char *id),
24 				  int (*init)(struct clk *clk),
25 				  void (*exit)(struct clk *clk))
26 {
27 	struct devm_clk_state *state;
28 	struct clk *clk;
29 	int ret;
30 
31 	state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
32 	if (!state)
33 		return ERR_PTR(-ENOMEM);
34 
35 	clk = get(dev, id);
36 	if (IS_ERR(clk)) {
37 		ret = PTR_ERR(clk);
38 		goto err_clk_get;
39 	}
40 
41 	if (init) {
42 		ret = init(clk);
43 		if (ret)
44 			goto err_clk_init;
45 	}
46 
47 	state->clk = clk;
48 	state->exit = exit;
49 
50 	devres_add(dev, state);
51 
52 	return clk;
53 
54 err_clk_init:
55 
56 	clk_put(clk);
57 err_clk_get:
58 
59 	devres_free(state);
60 	return ERR_PTR(ret);
61 }
62 
63 struct clk *devm_clk_get(struct device *dev, const char *id)
64 {
65 	return __devm_clk_get(dev, id, clk_get, NULL, NULL);
66 }
67 EXPORT_SYMBOL(devm_clk_get);
68 
69 struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
70 {
71 	return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
72 }
73 EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
74 
75 struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
76 {
77 	return __devm_clk_get(dev, id, clk_get,
78 			      clk_prepare_enable, clk_disable_unprepare);
79 }
80 EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
81 
82 struct clk *devm_clk_get_optional(struct device *dev, const char *id)
83 {
84 	return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
85 }
86 EXPORT_SYMBOL(devm_clk_get_optional);
87 
88 struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
89 {
90 	return __devm_clk_get(dev, id, clk_get_optional,
91 			      clk_prepare, clk_unprepare);
92 }
93 EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
94 
95 struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
96 {
97 	return __devm_clk_get(dev, id, clk_get_optional,
98 			      clk_prepare_enable, clk_disable_unprepare);
99 }
100 EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
101 
102 struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
103 						    const char *id,
104 						    unsigned long rate)
105 {
106 	struct clk *clk;
107 	int ret;
108 
109 	clk = __devm_clk_get(dev, id, clk_get_optional, NULL,
110 			     clk_disable_unprepare);
111 	if (IS_ERR(clk))
112 		return ERR_CAST(clk);
113 
114 	ret = clk_set_rate(clk, rate);
115 	if (ret)
116 		goto out_put_clk;
117 
118 	ret = clk_prepare_enable(clk);
119 	if (ret)
120 		goto out_put_clk;
121 
122 	return clk;
123 
124 out_put_clk:
125 	devm_clk_put(dev, clk);
126 	return ERR_PTR(ret);
127 }
128 EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate);
129 
130 struct clk_bulk_devres {
131 	struct clk_bulk_data *clks;
132 	int num_clks;
133 };
134 
135 static void devm_clk_bulk_release(struct device *dev, void *res)
136 {
137 	struct clk_bulk_devres *devres = res;
138 
139 	clk_bulk_put(devres->num_clks, devres->clks);
140 }
141 
142 static int __devm_clk_bulk_get(struct device *dev, int num_clks,
143 			       struct clk_bulk_data *clks, bool optional)
144 {
145 	struct clk_bulk_devres *devres;
146 	int ret;
147 
148 	devres = devres_alloc(devm_clk_bulk_release,
149 			      sizeof(*devres), GFP_KERNEL);
150 	if (!devres)
151 		return -ENOMEM;
152 
153 	if (optional)
154 		ret = clk_bulk_get_optional(dev, num_clks, clks);
155 	else
156 		ret = clk_bulk_get(dev, num_clks, clks);
157 	if (!ret) {
158 		devres->clks = clks;
159 		devres->num_clks = num_clks;
160 		devres_add(dev, devres);
161 	} else {
162 		devres_free(devres);
163 	}
164 
165 	return ret;
166 }
167 
168 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
169 		      struct clk_bulk_data *clks)
170 {
171 	return __devm_clk_bulk_get(dev, num_clks, clks, false);
172 }
173 EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
174 
175 int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
176 		      struct clk_bulk_data *clks)
177 {
178 	return __devm_clk_bulk_get(dev, num_clks, clks, true);
179 }
180 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
181 
182 static void devm_clk_bulk_release_all(struct device *dev, void *res)
183 {
184 	struct clk_bulk_devres *devres = res;
185 
186 	clk_bulk_put_all(devres->num_clks, devres->clks);
187 }
188 
189 int __must_check devm_clk_bulk_get_all(struct device *dev,
190 				       struct clk_bulk_data **clks)
191 {
192 	struct clk_bulk_devres *devres;
193 	int ret;
194 
195 	devres = devres_alloc(devm_clk_bulk_release_all,
196 			      sizeof(*devres), GFP_KERNEL);
197 	if (!devres)
198 		return -ENOMEM;
199 
200 	ret = clk_bulk_get_all(dev, &devres->clks);
201 	if (ret > 0) {
202 		*clks = devres->clks;
203 		devres->num_clks = ret;
204 		devres_add(dev, devres);
205 	} else {
206 		devres_free(devres);
207 	}
208 
209 	return ret;
210 }
211 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
212 
213 static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
214 {
215 	struct clk_bulk_devres *devres = res;
216 
217 	clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
218 	clk_bulk_put_all(devres->num_clks, devres->clks);
219 }
220 
221 int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
222 					       struct clk_bulk_data **clks)
223 {
224 	struct clk_bulk_devres *devres;
225 	int ret;
226 
227 	devres = devres_alloc(devm_clk_bulk_release_all_enable,
228 			      sizeof(*devres), GFP_KERNEL);
229 	if (!devres)
230 		return -ENOMEM;
231 
232 	ret = clk_bulk_get_all(dev, &devres->clks);
233 	if (ret > 0) {
234 		*clks = devres->clks;
235 		devres->num_clks = ret;
236 	} else {
237 		devres_free(devres);
238 		return ret;
239 	}
240 
241 	ret = clk_bulk_prepare_enable(devres->num_clks, *clks);
242 	if (!ret) {
243 		devres_add(dev, devres);
244 	} else {
245 		clk_bulk_put_all(devres->num_clks, devres->clks);
246 		devres_free(devres);
247 		return ret;
248 	}
249 
250 	return devres->num_clks;
251 }
252 EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
253 
254 static int devm_clk_match(struct device *dev, void *res, void *data)
255 {
256 	struct clk **c = res;
257 	if (!c || !*c) {
258 		WARN_ON(!c || !*c);
259 		return 0;
260 	}
261 	return *c == data;
262 }
263 
264 void devm_clk_put(struct device *dev, struct clk *clk)
265 {
266 	int ret;
267 
268 	ret = devres_release(dev, devm_clk_release, devm_clk_match, clk);
269 
270 	WARN_ON(ret);
271 }
272 EXPORT_SYMBOL(devm_clk_put);
273 
274 struct clk *devm_get_clk_from_child(struct device *dev,
275 				    struct device_node *np, const char *con_id)
276 {
277 	struct devm_clk_state *state;
278 	struct clk *clk;
279 
280 	state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
281 	if (!state)
282 		return ERR_PTR(-ENOMEM);
283 
284 	clk = of_clk_get_by_name(np, con_id);
285 	if (!IS_ERR(clk)) {
286 		state->clk = clk;
287 		devres_add(dev, state);
288 	} else {
289 		devres_free(state);
290 	}
291 
292 	return clk;
293 }
294 EXPORT_SYMBOL(devm_get_clk_from_child);
295