19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2872f91b5SGeorgi Djakov /*
3872f91b5SGeorgi Djakov * Copyright (c) 2016, Linaro Limited
4872f91b5SGeorgi Djakov * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5872f91b5SGeorgi Djakov */
6872f91b5SGeorgi Djakov
7872f91b5SGeorgi Djakov #include <linux/clk-provider.h>
8872f91b5SGeorgi Djakov #include <linux/err.h>
9872f91b5SGeorgi Djakov #include <linux/export.h>
10872f91b5SGeorgi Djakov #include <linux/init.h>
11872f91b5SGeorgi Djakov #include <linux/kernel.h>
12872f91b5SGeorgi Djakov #include <linux/module.h>
13872f91b5SGeorgi Djakov #include <linux/mutex.h>
14872f91b5SGeorgi Djakov #include <linux/mfd/qcom_rpm.h>
15872f91b5SGeorgi Djakov #include <linux/of.h>
16872f91b5SGeorgi Djakov #include <linux/platform_device.h>
17872f91b5SGeorgi Djakov
18872f91b5SGeorgi Djakov #include <dt-bindings/mfd/qcom-rpm.h>
19872f91b5SGeorgi Djakov #include <dt-bindings/clock/qcom,rpmcc.h>
20872f91b5SGeorgi Djakov
21872f91b5SGeorgi Djakov #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
22872f91b5SGeorgi Djakov #define QCOM_RPM_SCALING_ENABLE_ID 0x2
238bcde658SSrinivas Kandagatla #define QCOM_RPM_XO_MODE_ON 0x2
24872f91b5SGeorgi Djakov
25129d9cd9SChristian Marangi static const struct clk_parent_data gcc_pxo[] = {
26129d9cd9SChristian Marangi { .fw_name = "pxo", .name = "pxo_board" },
27129d9cd9SChristian Marangi };
28129d9cd9SChristian Marangi
29129d9cd9SChristian Marangi static const struct clk_parent_data gcc_cxo[] = {
30129d9cd9SChristian Marangi { .fw_name = "cxo", .name = "cxo_board" },
31129d9cd9SChristian Marangi };
32129d9cd9SChristian Marangi
333de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM(_name, r_id) \
343de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_a_clk; \
353de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
36872f91b5SGeorgi Djakov .rpm_clk_id = (r_id), \
373de1c1fdSDmitry Baryshkov .peer = &clk_rpm_##_name##_a_clk, \
38872f91b5SGeorgi Djakov .rate = INT_MAX, \
39872f91b5SGeorgi Djakov .hw.init = &(struct clk_init_data){ \
40872f91b5SGeorgi Djakov .ops = &clk_rpm_ops, \
4135a57cdaSDmitry Baryshkov .name = #_name "_clk", \
42129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
43129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
44872f91b5SGeorgi Djakov }, \
45872f91b5SGeorgi Djakov }; \
463de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_a_clk = { \
47872f91b5SGeorgi Djakov .rpm_clk_id = (r_id), \
483de1c1fdSDmitry Baryshkov .peer = &clk_rpm_##_name##_clk, \
49872f91b5SGeorgi Djakov .active_only = true, \
50872f91b5SGeorgi Djakov .rate = INT_MAX, \
51872f91b5SGeorgi Djakov .hw.init = &(struct clk_init_data){ \
52872f91b5SGeorgi Djakov .ops = &clk_rpm_ops, \
5335a57cdaSDmitry Baryshkov .name = #_name "_a_clk", \
54129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
55129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
56872f91b5SGeorgi Djakov }, \
57872f91b5SGeorgi Djakov }
58872f91b5SGeorgi Djakov
593de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM_XO_BUFFER(_name, offset) \
603de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
618bcde658SSrinivas Kandagatla .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
628bcde658SSrinivas Kandagatla .xo_offset = (offset), \
638bcde658SSrinivas Kandagatla .hw.init = &(struct clk_init_data){ \
648bcde658SSrinivas Kandagatla .ops = &clk_rpm_xo_ops, \
65e9bf411aSDmitry Baryshkov .name = #_name "_clk", \
66129d9cd9SChristian Marangi .parent_data = gcc_cxo, \
67129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_cxo), \
688bcde658SSrinivas Kandagatla }, \
698bcde658SSrinivas Kandagatla }
708bcde658SSrinivas Kandagatla
713de1c1fdSDmitry Baryshkov #define DEFINE_CLK_RPM_FIXED(_name, r_id, r) \
723de1c1fdSDmitry Baryshkov static struct clk_rpm clk_rpm_##_name##_clk = { \
73d4a69583SLinus Walleij .rpm_clk_id = (r_id), \
74d4a69583SLinus Walleij .rate = (r), \
75d4a69583SLinus Walleij .hw.init = &(struct clk_init_data){ \
76d4a69583SLinus Walleij .ops = &clk_rpm_fixed_ops, \
77e9bf411aSDmitry Baryshkov .name = #_name "_clk", \
78129d9cd9SChristian Marangi .parent_data = gcc_pxo, \
79129d9cd9SChristian Marangi .num_parents = ARRAY_SIZE(gcc_pxo), \
80d4a69583SLinus Walleij }, \
81d4a69583SLinus Walleij }
82d4a69583SLinus Walleij
83872f91b5SGeorgi Djakov #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
84872f91b5SGeorgi Djakov
858bcde658SSrinivas Kandagatla struct rpm_cc;
868bcde658SSrinivas Kandagatla
87872f91b5SGeorgi Djakov struct clk_rpm {
88872f91b5SGeorgi Djakov const int rpm_clk_id;
898bcde658SSrinivas Kandagatla const int xo_offset;
90872f91b5SGeorgi Djakov const bool active_only;
91872f91b5SGeorgi Djakov unsigned long rate;
92872f91b5SGeorgi Djakov bool enabled;
93872f91b5SGeorgi Djakov bool branch;
94872f91b5SGeorgi Djakov struct clk_rpm *peer;
95872f91b5SGeorgi Djakov struct clk_hw hw;
96872f91b5SGeorgi Djakov struct qcom_rpm *rpm;
978bcde658SSrinivas Kandagatla struct rpm_cc *rpm_cc;
98872f91b5SGeorgi Djakov };
99872f91b5SGeorgi Djakov
100872f91b5SGeorgi Djakov struct rpm_cc {
101c260524aSGeorgi Djakov struct clk_rpm **clks;
102c260524aSGeorgi Djakov size_t num_clks;
1038bcde658SSrinivas Kandagatla u32 xo_buffer_value;
1048bcde658SSrinivas Kandagatla struct mutex xo_lock;
105872f91b5SGeorgi Djakov };
106872f91b5SGeorgi Djakov
107872f91b5SGeorgi Djakov struct rpm_clk_desc {
108872f91b5SGeorgi Djakov struct clk_rpm **clks;
109872f91b5SGeorgi Djakov size_t num_clks;
110872f91b5SGeorgi Djakov };
111872f91b5SGeorgi Djakov
112872f91b5SGeorgi Djakov static DEFINE_MUTEX(rpm_clk_lock);
113872f91b5SGeorgi Djakov
clk_rpm_handoff(struct clk_rpm * r)114872f91b5SGeorgi Djakov static int clk_rpm_handoff(struct clk_rpm *r)
115872f91b5SGeorgi Djakov {
116872f91b5SGeorgi Djakov int ret;
117872f91b5SGeorgi Djakov u32 value = INT_MAX;
118872f91b5SGeorgi Djakov
119d4a69583SLinus Walleij /*
120d4a69583SLinus Walleij * The vendor tree simply reads the status for this
121d4a69583SLinus Walleij * RPM clock.
122d4a69583SLinus Walleij */
1238bcde658SSrinivas Kandagatla if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
1248bcde658SSrinivas Kandagatla r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
125d4a69583SLinus Walleij return 0;
126d4a69583SLinus Walleij
127872f91b5SGeorgi Djakov ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
128872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
129872f91b5SGeorgi Djakov if (ret)
130872f91b5SGeorgi Djakov return ret;
131872f91b5SGeorgi Djakov ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
132872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
133872f91b5SGeorgi Djakov if (ret)
134872f91b5SGeorgi Djakov return ret;
135872f91b5SGeorgi Djakov
136872f91b5SGeorgi Djakov return 0;
137872f91b5SGeorgi Djakov }
138872f91b5SGeorgi Djakov
clk_rpm_set_rate_active(struct clk_rpm * r,unsigned long rate)139872f91b5SGeorgi Djakov static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
140872f91b5SGeorgi Djakov {
141872f91b5SGeorgi Djakov u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
142872f91b5SGeorgi Djakov
143872f91b5SGeorgi Djakov return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
144872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
145872f91b5SGeorgi Djakov }
146872f91b5SGeorgi Djakov
clk_rpm_set_rate_sleep(struct clk_rpm * r,unsigned long rate)147872f91b5SGeorgi Djakov static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
148872f91b5SGeorgi Djakov {
149872f91b5SGeorgi Djakov u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
150872f91b5SGeorgi Djakov
151872f91b5SGeorgi Djakov return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
152872f91b5SGeorgi Djakov r->rpm_clk_id, &value, 1);
153872f91b5SGeorgi Djakov }
154872f91b5SGeorgi Djakov
to_active_sleep(struct clk_rpm * r,unsigned long rate,unsigned long * active,unsigned long * sleep)155872f91b5SGeorgi Djakov static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
156872f91b5SGeorgi Djakov unsigned long *active, unsigned long *sleep)
157872f91b5SGeorgi Djakov {
158872f91b5SGeorgi Djakov *active = rate;
159872f91b5SGeorgi Djakov
160872f91b5SGeorgi Djakov /*
161872f91b5SGeorgi Djakov * Active-only clocks don't care what the rate is during sleep. So,
162872f91b5SGeorgi Djakov * they vote for zero.
163872f91b5SGeorgi Djakov */
164872f91b5SGeorgi Djakov if (r->active_only)
165872f91b5SGeorgi Djakov *sleep = 0;
166872f91b5SGeorgi Djakov else
167872f91b5SGeorgi Djakov *sleep = *active;
168872f91b5SGeorgi Djakov }
169872f91b5SGeorgi Djakov
clk_rpm_prepare(struct clk_hw * hw)170872f91b5SGeorgi Djakov static int clk_rpm_prepare(struct clk_hw *hw)
171872f91b5SGeorgi Djakov {
172872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
173872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
174872f91b5SGeorgi Djakov unsigned long this_rate = 0, this_sleep_rate = 0;
175872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
176872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
177872f91b5SGeorgi Djakov int ret = 0;
178872f91b5SGeorgi Djakov
179872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
180872f91b5SGeorgi Djakov
181872f91b5SGeorgi Djakov /* Don't send requests to the RPM if the rate has not been set. */
182872f91b5SGeorgi Djakov if (!r->rate)
183872f91b5SGeorgi Djakov goto out;
184872f91b5SGeorgi Djakov
185872f91b5SGeorgi Djakov to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
186872f91b5SGeorgi Djakov
187872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
188872f91b5SGeorgi Djakov if (peer->enabled)
189872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate,
190872f91b5SGeorgi Djakov &peer_rate, &peer_sleep_rate);
191872f91b5SGeorgi Djakov
192872f91b5SGeorgi Djakov active_rate = max(this_rate, peer_rate);
193872f91b5SGeorgi Djakov
194872f91b5SGeorgi Djakov if (r->branch)
195872f91b5SGeorgi Djakov active_rate = !!active_rate;
196872f91b5SGeorgi Djakov
197872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
198872f91b5SGeorgi Djakov if (ret)
199872f91b5SGeorgi Djakov goto out;
200872f91b5SGeorgi Djakov
201872f91b5SGeorgi Djakov sleep_rate = max(this_sleep_rate, peer_sleep_rate);
202872f91b5SGeorgi Djakov if (r->branch)
203872f91b5SGeorgi Djakov sleep_rate = !!sleep_rate;
204872f91b5SGeorgi Djakov
205872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
206872f91b5SGeorgi Djakov if (ret)
207872f91b5SGeorgi Djakov /* Undo the active set vote and restore it */
208872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, peer_rate);
209872f91b5SGeorgi Djakov
210872f91b5SGeorgi Djakov out:
211872f91b5SGeorgi Djakov if (!ret)
212872f91b5SGeorgi Djakov r->enabled = true;
213872f91b5SGeorgi Djakov
214872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
215872f91b5SGeorgi Djakov
216872f91b5SGeorgi Djakov return ret;
217872f91b5SGeorgi Djakov }
218872f91b5SGeorgi Djakov
clk_rpm_unprepare(struct clk_hw * hw)219872f91b5SGeorgi Djakov static void clk_rpm_unprepare(struct clk_hw *hw)
220872f91b5SGeorgi Djakov {
221872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
222872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
223872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
224872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
225872f91b5SGeorgi Djakov int ret;
226872f91b5SGeorgi Djakov
227872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
228872f91b5SGeorgi Djakov
229872f91b5SGeorgi Djakov if (!r->rate)
230872f91b5SGeorgi Djakov goto out;
231872f91b5SGeorgi Djakov
232872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
233872f91b5SGeorgi Djakov if (peer->enabled)
234872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate, &peer_rate,
235872f91b5SGeorgi Djakov &peer_sleep_rate);
236872f91b5SGeorgi Djakov
237872f91b5SGeorgi Djakov active_rate = r->branch ? !!peer_rate : peer_rate;
238872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
239872f91b5SGeorgi Djakov if (ret)
240872f91b5SGeorgi Djakov goto out;
241872f91b5SGeorgi Djakov
242872f91b5SGeorgi Djakov sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
243872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
244872f91b5SGeorgi Djakov if (ret)
245872f91b5SGeorgi Djakov goto out;
246872f91b5SGeorgi Djakov
247872f91b5SGeorgi Djakov r->enabled = false;
248872f91b5SGeorgi Djakov
249872f91b5SGeorgi Djakov out:
250872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
251872f91b5SGeorgi Djakov }
252872f91b5SGeorgi Djakov
clk_rpm_xo_prepare(struct clk_hw * hw)2538bcde658SSrinivas Kandagatla static int clk_rpm_xo_prepare(struct clk_hw *hw)
2548bcde658SSrinivas Kandagatla {
2558bcde658SSrinivas Kandagatla struct clk_rpm *r = to_clk_rpm(hw);
2568bcde658SSrinivas Kandagatla struct rpm_cc *rcc = r->rpm_cc;
2578bcde658SSrinivas Kandagatla int ret, clk_id = r->rpm_clk_id;
2588bcde658SSrinivas Kandagatla u32 value;
2598bcde658SSrinivas Kandagatla
2608bcde658SSrinivas Kandagatla mutex_lock(&rcc->xo_lock);
2618bcde658SSrinivas Kandagatla
2628bcde658SSrinivas Kandagatla value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
2638bcde658SSrinivas Kandagatla ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
2648bcde658SSrinivas Kandagatla if (!ret) {
2658bcde658SSrinivas Kandagatla r->enabled = true;
2668bcde658SSrinivas Kandagatla rcc->xo_buffer_value = value;
2678bcde658SSrinivas Kandagatla }
2688bcde658SSrinivas Kandagatla
2698bcde658SSrinivas Kandagatla mutex_unlock(&rcc->xo_lock);
2708bcde658SSrinivas Kandagatla
2718bcde658SSrinivas Kandagatla return ret;
2728bcde658SSrinivas Kandagatla }
2738bcde658SSrinivas Kandagatla
clk_rpm_xo_unprepare(struct clk_hw * hw)2748bcde658SSrinivas Kandagatla static void clk_rpm_xo_unprepare(struct clk_hw *hw)
2758bcde658SSrinivas Kandagatla {
2768bcde658SSrinivas Kandagatla struct clk_rpm *r = to_clk_rpm(hw);
2778bcde658SSrinivas Kandagatla struct rpm_cc *rcc = r->rpm_cc;
2788bcde658SSrinivas Kandagatla int ret, clk_id = r->rpm_clk_id;
2798bcde658SSrinivas Kandagatla u32 value;
2808bcde658SSrinivas Kandagatla
2818bcde658SSrinivas Kandagatla mutex_lock(&rcc->xo_lock);
2828bcde658SSrinivas Kandagatla
2838bcde658SSrinivas Kandagatla value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
2848bcde658SSrinivas Kandagatla ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
2858bcde658SSrinivas Kandagatla if (!ret) {
2868bcde658SSrinivas Kandagatla r->enabled = false;
2878bcde658SSrinivas Kandagatla rcc->xo_buffer_value = value;
2888bcde658SSrinivas Kandagatla }
2898bcde658SSrinivas Kandagatla
2908bcde658SSrinivas Kandagatla mutex_unlock(&rcc->xo_lock);
2918bcde658SSrinivas Kandagatla }
2928bcde658SSrinivas Kandagatla
clk_rpm_fixed_prepare(struct clk_hw * hw)293d4a69583SLinus Walleij static int clk_rpm_fixed_prepare(struct clk_hw *hw)
294d4a69583SLinus Walleij {
295d4a69583SLinus Walleij struct clk_rpm *r = to_clk_rpm(hw);
296d4a69583SLinus Walleij u32 value = 1;
297d4a69583SLinus Walleij int ret;
298d4a69583SLinus Walleij
299d4a69583SLinus Walleij ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
300d4a69583SLinus Walleij r->rpm_clk_id, &value, 1);
301d4a69583SLinus Walleij if (!ret)
302d4a69583SLinus Walleij r->enabled = true;
303d4a69583SLinus Walleij
304d4a69583SLinus Walleij return ret;
305d4a69583SLinus Walleij }
306d4a69583SLinus Walleij
clk_rpm_fixed_unprepare(struct clk_hw * hw)307d4a69583SLinus Walleij static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
308d4a69583SLinus Walleij {
309d4a69583SLinus Walleij struct clk_rpm *r = to_clk_rpm(hw);
310d4a69583SLinus Walleij u32 value = 0;
311d4a69583SLinus Walleij int ret;
312d4a69583SLinus Walleij
313d4a69583SLinus Walleij ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
314d4a69583SLinus Walleij r->rpm_clk_id, &value, 1);
315d4a69583SLinus Walleij if (!ret)
316d4a69583SLinus Walleij r->enabled = false;
317d4a69583SLinus Walleij }
318d4a69583SLinus Walleij
clk_rpm_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)319872f91b5SGeorgi Djakov static int clk_rpm_set_rate(struct clk_hw *hw,
320872f91b5SGeorgi Djakov unsigned long rate, unsigned long parent_rate)
321872f91b5SGeorgi Djakov {
322872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
323872f91b5SGeorgi Djakov struct clk_rpm *peer = r->peer;
324872f91b5SGeorgi Djakov unsigned long active_rate, sleep_rate;
325872f91b5SGeorgi Djakov unsigned long this_rate = 0, this_sleep_rate = 0;
326872f91b5SGeorgi Djakov unsigned long peer_rate = 0, peer_sleep_rate = 0;
327872f91b5SGeorgi Djakov int ret = 0;
328872f91b5SGeorgi Djakov
329872f91b5SGeorgi Djakov mutex_lock(&rpm_clk_lock);
330872f91b5SGeorgi Djakov
331872f91b5SGeorgi Djakov if (!r->enabled)
332872f91b5SGeorgi Djakov goto out;
333872f91b5SGeorgi Djakov
334872f91b5SGeorgi Djakov to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
335872f91b5SGeorgi Djakov
336872f91b5SGeorgi Djakov /* Take peer clock's rate into account only if it's enabled. */
337872f91b5SGeorgi Djakov if (peer->enabled)
338872f91b5SGeorgi Djakov to_active_sleep(peer, peer->rate,
339872f91b5SGeorgi Djakov &peer_rate, &peer_sleep_rate);
340872f91b5SGeorgi Djakov
341872f91b5SGeorgi Djakov active_rate = max(this_rate, peer_rate);
342872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_active(r, active_rate);
343872f91b5SGeorgi Djakov if (ret)
344872f91b5SGeorgi Djakov goto out;
345872f91b5SGeorgi Djakov
346872f91b5SGeorgi Djakov sleep_rate = max(this_sleep_rate, peer_sleep_rate);
347872f91b5SGeorgi Djakov ret = clk_rpm_set_rate_sleep(r, sleep_rate);
348872f91b5SGeorgi Djakov if (ret)
349872f91b5SGeorgi Djakov goto out;
350872f91b5SGeorgi Djakov
351872f91b5SGeorgi Djakov r->rate = rate;
352872f91b5SGeorgi Djakov
353872f91b5SGeorgi Djakov out:
354872f91b5SGeorgi Djakov mutex_unlock(&rpm_clk_lock);
355872f91b5SGeorgi Djakov
356872f91b5SGeorgi Djakov return ret;
357872f91b5SGeorgi Djakov }
358872f91b5SGeorgi Djakov
clk_rpm_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)359872f91b5SGeorgi Djakov static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
360872f91b5SGeorgi Djakov unsigned long *parent_rate)
361872f91b5SGeorgi Djakov {
362872f91b5SGeorgi Djakov /*
363872f91b5SGeorgi Djakov * RPM handles rate rounding and we don't have a way to
364872f91b5SGeorgi Djakov * know what the rate will be, so just return whatever
365872f91b5SGeorgi Djakov * rate is requested.
366872f91b5SGeorgi Djakov */
367872f91b5SGeorgi Djakov return rate;
368872f91b5SGeorgi Djakov }
369872f91b5SGeorgi Djakov
clk_rpm_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)370872f91b5SGeorgi Djakov static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
371872f91b5SGeorgi Djakov unsigned long parent_rate)
372872f91b5SGeorgi Djakov {
373872f91b5SGeorgi Djakov struct clk_rpm *r = to_clk_rpm(hw);
374872f91b5SGeorgi Djakov
375872f91b5SGeorgi Djakov /*
376872f91b5SGeorgi Djakov * RPM handles rate rounding and we don't have a way to
377872f91b5SGeorgi Djakov * know what the rate will be, so just return whatever
378872f91b5SGeorgi Djakov * rate was set.
379872f91b5SGeorgi Djakov */
380872f91b5SGeorgi Djakov return r->rate;
381872f91b5SGeorgi Djakov }
382872f91b5SGeorgi Djakov
3838bcde658SSrinivas Kandagatla static const struct clk_ops clk_rpm_xo_ops = {
3848bcde658SSrinivas Kandagatla .prepare = clk_rpm_xo_prepare,
3858bcde658SSrinivas Kandagatla .unprepare = clk_rpm_xo_unprepare,
3868bcde658SSrinivas Kandagatla };
3878bcde658SSrinivas Kandagatla
388d4a69583SLinus Walleij static const struct clk_ops clk_rpm_fixed_ops = {
389d4a69583SLinus Walleij .prepare = clk_rpm_fixed_prepare,
390d4a69583SLinus Walleij .unprepare = clk_rpm_fixed_unprepare,
391d4a69583SLinus Walleij .round_rate = clk_rpm_round_rate,
392d4a69583SLinus Walleij .recalc_rate = clk_rpm_recalc_rate,
393d4a69583SLinus Walleij };
394d4a69583SLinus Walleij
395872f91b5SGeorgi Djakov static const struct clk_ops clk_rpm_ops = {
396872f91b5SGeorgi Djakov .prepare = clk_rpm_prepare,
397872f91b5SGeorgi Djakov .unprepare = clk_rpm_unprepare,
398872f91b5SGeorgi Djakov .set_rate = clk_rpm_set_rate,
399872f91b5SGeorgi Djakov .round_rate = clk_rpm_round_rate,
400872f91b5SGeorgi Djakov .recalc_rate = clk_rpm_recalc_rate,
401872f91b5SGeorgi Djakov };
402872f91b5SGeorgi Djakov
4033de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(afab, QCOM_RPM_APPS_FABRIC_CLK);
4043de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(sfab, QCOM_RPM_SYS_FABRIC_CLK);
4053de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(mmfab, QCOM_RPM_MM_FABRIC_CLK);
4063de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(daytona, QCOM_RPM_DAYTONA_FABRIC_CLK);
4073de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(sfpb, QCOM_RPM_SFPB_CLK);
4083de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(cfpb, QCOM_RPM_CFPB_CLK);
4093de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(mmfpb, QCOM_RPM_MMFPB_CLK);
4103de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(smi, QCOM_RPM_SMI_CLK);
4113de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(ebi1, QCOM_RPM_EBI1_CLK);
4123de1c1fdSDmitry Baryshkov
4133de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(qdss, QCOM_RPM_QDSS_CLK);
4143de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(nss_fabric_0, QCOM_RPM_NSS_FABRIC_0_CLK);
4153de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM(nss_fabric_1, QCOM_RPM_NSS_FABRIC_1_CLK);
4163de1c1fdSDmitry Baryshkov
4173de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_FIXED(pll4, QCOM_RPM_PLL_4, 540672000);
4183de1c1fdSDmitry Baryshkov
4193de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_d0, 0);
4203de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_d1, 8);
4213de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a0, 16);
4223de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a1, 24);
4233de1c1fdSDmitry Baryshkov DEFINE_CLK_RPM_XO_BUFFER(xo_a2, 28);
424d4a69583SLinus Walleij
425d4a69583SLinus Walleij static struct clk_rpm *msm8660_clks[] = {
4263de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4273de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4283de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4293de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4303de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
4313de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
4323de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4333de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4343de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4353de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4363de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4373de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4383de1c1fdSDmitry Baryshkov [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
4393de1c1fdSDmitry Baryshkov [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
4403de1c1fdSDmitry Baryshkov [RPM_SMI_CLK] = &clk_rpm_smi_clk,
4413de1c1fdSDmitry Baryshkov [RPM_SMI_A_CLK] = &clk_rpm_smi_a_clk,
4423de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4433de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4443de1c1fdSDmitry Baryshkov [RPM_PLL4_CLK] = &clk_rpm_pll4_clk,
445d4a69583SLinus Walleij };
446d4a69583SLinus Walleij
447d4a69583SLinus Walleij static const struct rpm_clk_desc rpm_clk_msm8660 = {
448d4a69583SLinus Walleij .clks = msm8660_clks,
449d4a69583SLinus Walleij .num_clks = ARRAY_SIZE(msm8660_clks),
450d4a69583SLinus Walleij };
451d4a69583SLinus Walleij
452872f91b5SGeorgi Djakov static struct clk_rpm *apq8064_clks[] = {
4533de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4543de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4553de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4563de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4573de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4583de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4593de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4603de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4613de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
4623de1c1fdSDmitry Baryshkov [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
4633de1c1fdSDmitry Baryshkov [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
4643de1c1fdSDmitry Baryshkov [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
4653de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4663de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4673de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4683de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4693de1c1fdSDmitry Baryshkov [RPM_QDSS_CLK] = &clk_rpm_qdss_clk,
4703de1c1fdSDmitry Baryshkov [RPM_QDSS_A_CLK] = &clk_rpm_qdss_a_clk,
4713de1c1fdSDmitry Baryshkov [RPM_XO_D0] = &clk_rpm_xo_d0_clk,
4723de1c1fdSDmitry Baryshkov [RPM_XO_D1] = &clk_rpm_xo_d1_clk,
4733de1c1fdSDmitry Baryshkov [RPM_XO_A0] = &clk_rpm_xo_a0_clk,
4743de1c1fdSDmitry Baryshkov [RPM_XO_A1] = &clk_rpm_xo_a1_clk,
4753de1c1fdSDmitry Baryshkov [RPM_XO_A2] = &clk_rpm_xo_a2_clk,
476872f91b5SGeorgi Djakov };
477872f91b5SGeorgi Djakov
478872f91b5SGeorgi Djakov static const struct rpm_clk_desc rpm_clk_apq8064 = {
479872f91b5SGeorgi Djakov .clks = apq8064_clks,
480872f91b5SGeorgi Djakov .num_clks = ARRAY_SIZE(apq8064_clks),
481872f91b5SGeorgi Djakov };
482872f91b5SGeorgi Djakov
483eec15273SAnsuel Smith static struct clk_rpm *ipq806x_clks[] = {
4843de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
4853de1c1fdSDmitry Baryshkov [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
4863de1c1fdSDmitry Baryshkov [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
4873de1c1fdSDmitry Baryshkov [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
4883de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
4893de1c1fdSDmitry Baryshkov [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
4903de1c1fdSDmitry Baryshkov [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
4913de1c1fdSDmitry Baryshkov [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
4923de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
4933de1c1fdSDmitry Baryshkov [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
4943de1c1fdSDmitry Baryshkov [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
4953de1c1fdSDmitry Baryshkov [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
4963de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_0_CLK] = &clk_rpm_nss_fabric_0_clk,
4973de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_0_A_CLK] = &clk_rpm_nss_fabric_0_a_clk,
4983de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_1_CLK] = &clk_rpm_nss_fabric_1_clk,
4993de1c1fdSDmitry Baryshkov [RPM_NSS_FABRIC_1_A_CLK] = &clk_rpm_nss_fabric_1_a_clk,
500eec15273SAnsuel Smith };
501eec15273SAnsuel Smith
502eec15273SAnsuel Smith static const struct rpm_clk_desc rpm_clk_ipq806x = {
503eec15273SAnsuel Smith .clks = ipq806x_clks,
504eec15273SAnsuel Smith .num_clks = ARRAY_SIZE(ipq806x_clks),
505eec15273SAnsuel Smith };
506eec15273SAnsuel Smith
507872f91b5SGeorgi Djakov static const struct of_device_id rpm_clk_match_table[] = {
508d4a69583SLinus Walleij { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
509d4a69583SLinus Walleij { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
510872f91b5SGeorgi Djakov { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
511eec15273SAnsuel Smith { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
512872f91b5SGeorgi Djakov { }
513872f91b5SGeorgi Djakov };
514872f91b5SGeorgi Djakov MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
515872f91b5SGeorgi Djakov
qcom_rpm_clk_hw_get(struct of_phandle_args * clkspec,void * data)516c260524aSGeorgi Djakov static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
517c260524aSGeorgi Djakov void *data)
518c260524aSGeorgi Djakov {
519c260524aSGeorgi Djakov struct rpm_cc *rcc = data;
520c260524aSGeorgi Djakov unsigned int idx = clkspec->args[0];
521c260524aSGeorgi Djakov
522c260524aSGeorgi Djakov if (idx >= rcc->num_clks) {
523c260524aSGeorgi Djakov pr_err("%s: invalid index %u\n", __func__, idx);
524c260524aSGeorgi Djakov return ERR_PTR(-EINVAL);
525c260524aSGeorgi Djakov }
526c260524aSGeorgi Djakov
527c260524aSGeorgi Djakov return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
528c260524aSGeorgi Djakov }
529c260524aSGeorgi Djakov
rpm_clk_probe(struct platform_device * pdev)530872f91b5SGeorgi Djakov static int rpm_clk_probe(struct platform_device *pdev)
531872f91b5SGeorgi Djakov {
532872f91b5SGeorgi Djakov struct rpm_cc *rcc;
533872f91b5SGeorgi Djakov int ret;
534872f91b5SGeorgi Djakov size_t num_clks, i;
535872f91b5SGeorgi Djakov struct qcom_rpm *rpm;
536872f91b5SGeorgi Djakov struct clk_rpm **rpm_clks;
537872f91b5SGeorgi Djakov const struct rpm_clk_desc *desc;
538872f91b5SGeorgi Djakov
539872f91b5SGeorgi Djakov rpm = dev_get_drvdata(pdev->dev.parent);
540872f91b5SGeorgi Djakov if (!rpm) {
541872f91b5SGeorgi Djakov dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
542872f91b5SGeorgi Djakov return -ENODEV;
543872f91b5SGeorgi Djakov }
544872f91b5SGeorgi Djakov
545872f91b5SGeorgi Djakov desc = of_device_get_match_data(&pdev->dev);
546872f91b5SGeorgi Djakov if (!desc)
547872f91b5SGeorgi Djakov return -EINVAL;
548872f91b5SGeorgi Djakov
549872f91b5SGeorgi Djakov rpm_clks = desc->clks;
550872f91b5SGeorgi Djakov num_clks = desc->num_clks;
551872f91b5SGeorgi Djakov
552c260524aSGeorgi Djakov rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
553872f91b5SGeorgi Djakov if (!rcc)
554872f91b5SGeorgi Djakov return -ENOMEM;
555872f91b5SGeorgi Djakov
556c260524aSGeorgi Djakov rcc->clks = rpm_clks;
557c260524aSGeorgi Djakov rcc->num_clks = num_clks;
5588bcde658SSrinivas Kandagatla mutex_init(&rcc->xo_lock);
559872f91b5SGeorgi Djakov
560872f91b5SGeorgi Djakov for (i = 0; i < num_clks; i++) {
561872f91b5SGeorgi Djakov if (!rpm_clks[i])
562872f91b5SGeorgi Djakov continue;
563872f91b5SGeorgi Djakov
564872f91b5SGeorgi Djakov rpm_clks[i]->rpm = rpm;
5658bcde658SSrinivas Kandagatla rpm_clks[i]->rpm_cc = rcc;
566872f91b5SGeorgi Djakov
567872f91b5SGeorgi Djakov ret = clk_rpm_handoff(rpm_clks[i]);
568872f91b5SGeorgi Djakov if (ret)
569872f91b5SGeorgi Djakov goto err;
570872f91b5SGeorgi Djakov }
571872f91b5SGeorgi Djakov
572872f91b5SGeorgi Djakov for (i = 0; i < num_clks; i++) {
573c260524aSGeorgi Djakov if (!rpm_clks[i])
574872f91b5SGeorgi Djakov continue;
575872f91b5SGeorgi Djakov
576872f91b5SGeorgi Djakov ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
577872f91b5SGeorgi Djakov if (ret)
578872f91b5SGeorgi Djakov goto err;
579872f91b5SGeorgi Djakov }
580872f91b5SGeorgi Djakov
581*f1f67db9SLars-Peter Clausen ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_rpm_clk_hw_get,
582c260524aSGeorgi Djakov rcc);
583872f91b5SGeorgi Djakov if (ret)
584872f91b5SGeorgi Djakov goto err;
585872f91b5SGeorgi Djakov
586872f91b5SGeorgi Djakov return 0;
587872f91b5SGeorgi Djakov err:
588872f91b5SGeorgi Djakov dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
589872f91b5SGeorgi Djakov return ret;
590872f91b5SGeorgi Djakov }
591872f91b5SGeorgi Djakov
592872f91b5SGeorgi Djakov static struct platform_driver rpm_clk_driver = {
593872f91b5SGeorgi Djakov .driver = {
594872f91b5SGeorgi Djakov .name = "qcom-clk-rpm",
595872f91b5SGeorgi Djakov .of_match_table = rpm_clk_match_table,
596872f91b5SGeorgi Djakov },
597872f91b5SGeorgi Djakov .probe = rpm_clk_probe,
598872f91b5SGeorgi Djakov };
599872f91b5SGeorgi Djakov
rpm_clk_init(void)600872f91b5SGeorgi Djakov static int __init rpm_clk_init(void)
601872f91b5SGeorgi Djakov {
602872f91b5SGeorgi Djakov return platform_driver_register(&rpm_clk_driver);
603872f91b5SGeorgi Djakov }
604872f91b5SGeorgi Djakov core_initcall(rpm_clk_init);
605872f91b5SGeorgi Djakov
rpm_clk_exit(void)606872f91b5SGeorgi Djakov static void __exit rpm_clk_exit(void)
607872f91b5SGeorgi Djakov {
608872f91b5SGeorgi Djakov platform_driver_unregister(&rpm_clk_driver);
609872f91b5SGeorgi Djakov }
610872f91b5SGeorgi Djakov module_exit(rpm_clk_exit);
611872f91b5SGeorgi Djakov
612872f91b5SGeorgi Djakov MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
613872f91b5SGeorgi Djakov MODULE_LICENSE("GPL v2");
614872f91b5SGeorgi Djakov MODULE_ALIAS("platform:qcom-clk-rpm");
615