1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022, 2023 Linaro Ltd.
4 */
5 #include <linux/bitfield.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/interconnect-clk.h>
9 #include <linux/interconnect-provider.h>
10 #include <linux/of.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/regmap.h>
14
15 #include <dt-bindings/interconnect/qcom,msm8996-cbf.h>
16
17 #include "clk-alpha-pll.h"
18 #include "clk-regmap.h"
19
20 /* Need to match the order of clocks in DT binding */
21 enum {
22 DT_XO,
23 DT_APCS_AUX,
24 };
25
26 enum {
27 CBF_XO_INDEX,
28 CBF_PLL_INDEX,
29 CBF_DIV_INDEX,
30 CBF_APCS_AUX_INDEX,
31 };
32
33 #define DIV_THRESHOLD 600000000
34
35 #define CBF_MUX_OFFSET 0x18
36 #define CBF_MUX_PARENT_MASK GENMASK(1, 0)
37 #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
38 #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
39 FIELD_PREP(CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
40 #define CBF_MUX_AUTO_CLK_SEL_BIT BIT(6)
41
42 #define CBF_PLL_OFFSET 0xf000
43
44 static struct alpha_pll_config cbfpll_config = {
45 .l = 72,
46 .config_ctl_val = 0x200d4828,
47 .config_ctl_hi_val = 0x006,
48 .test_ctl_val = 0x1c000000,
49 .test_ctl_hi_val = 0x00004000,
50 .pre_div_mask = BIT(12),
51 .post_div_mask = 0x3 << 8,
52 .post_div_val = 0x1 << 8,
53 .main_output_mask = BIT(0),
54 .early_output_mask = BIT(3),
55 };
56
57 static struct clk_alpha_pll cbf_pll = {
58 .offset = CBF_PLL_OFFSET,
59 .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS],
60 .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
61 .clkr.hw.init = &(struct clk_init_data){
62 .name = "cbf_pll",
63 .parent_data = (const struct clk_parent_data[]) {
64 { .index = DT_XO, },
65 },
66 .num_parents = 1,
67 .ops = &clk_alpha_pll_hwfsm_ops,
68 },
69 };
70
71 static struct clk_fixed_factor cbf_pll_postdiv = {
72 .mult = 1,
73 .div = 2,
74 .hw.init = &(struct clk_init_data){
75 .name = "cbf_pll_postdiv",
76 .parent_hws = (const struct clk_hw*[]){
77 &cbf_pll.clkr.hw
78 },
79 .num_parents = 1,
80 .ops = &clk_fixed_factor_ops,
81 .flags = CLK_SET_RATE_PARENT,
82 },
83 };
84
85 static const struct clk_parent_data cbf_mux_parent_data[] = {
86 { .index = DT_XO },
87 { .hw = &cbf_pll.clkr.hw },
88 { .hw = &cbf_pll_postdiv.hw },
89 { .index = DT_APCS_AUX },
90 };
91
92 struct clk_cbf_8996_mux {
93 u32 reg;
94 struct notifier_block nb;
95 struct clk_regmap clkr;
96 };
97
to_clk_cbf_8996_mux(struct clk_regmap * clkr)98 static struct clk_cbf_8996_mux *to_clk_cbf_8996_mux(struct clk_regmap *clkr)
99 {
100 return container_of(clkr, struct clk_cbf_8996_mux, clkr);
101 }
102
103 static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
104 void *data);
105
clk_cbf_8996_mux_get_parent(struct clk_hw * hw)106 static u8 clk_cbf_8996_mux_get_parent(struct clk_hw *hw)
107 {
108 struct clk_regmap *clkr = to_clk_regmap(hw);
109 struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
110 u32 val;
111
112 regmap_read(clkr->regmap, mux->reg, &val);
113
114 return FIELD_GET(CBF_MUX_PARENT_MASK, val);
115 }
116
clk_cbf_8996_mux_set_parent(struct clk_hw * hw,u8 index)117 static int clk_cbf_8996_mux_set_parent(struct clk_hw *hw, u8 index)
118 {
119 struct clk_regmap *clkr = to_clk_regmap(hw);
120 struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
121 u32 val;
122
123 val = FIELD_PREP(CBF_MUX_PARENT_MASK, index);
124
125 return regmap_update_bits(clkr->regmap, mux->reg, CBF_MUX_PARENT_MASK, val);
126 }
127
clk_cbf_8996_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)128 static int clk_cbf_8996_mux_determine_rate(struct clk_hw *hw,
129 struct clk_rate_request *req)
130 {
131 struct clk_hw *parent;
132
133 if (req->rate < (DIV_THRESHOLD / cbf_pll_postdiv.div))
134 return -EINVAL;
135
136 if (req->rate < DIV_THRESHOLD)
137 parent = clk_hw_get_parent_by_index(hw, CBF_DIV_INDEX);
138 else
139 parent = clk_hw_get_parent_by_index(hw, CBF_PLL_INDEX);
140
141 if (!parent)
142 return -EINVAL;
143
144 req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
145 req->best_parent_hw = parent;
146
147 return 0;
148 }
149
150 static const struct clk_ops clk_cbf_8996_mux_ops = {
151 .set_parent = clk_cbf_8996_mux_set_parent,
152 .get_parent = clk_cbf_8996_mux_get_parent,
153 .determine_rate = clk_cbf_8996_mux_determine_rate,
154 };
155
156 static struct clk_cbf_8996_mux cbf_mux = {
157 .reg = CBF_MUX_OFFSET,
158 .nb.notifier_call = cbf_clk_notifier_cb,
159 .clkr.hw.init = &(struct clk_init_data) {
160 .name = "cbf_mux",
161 .parent_data = cbf_mux_parent_data,
162 .num_parents = ARRAY_SIZE(cbf_mux_parent_data),
163 .ops = &clk_cbf_8996_mux_ops,
164 /* CPU clock is critical and should never be gated */
165 .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
166 },
167 };
168
cbf_clk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)169 static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
170 void *data)
171 {
172 struct clk_notifier_data *cnd = data;
173
174 switch (event) {
175 case PRE_RATE_CHANGE:
176 /*
177 * Avoid overvolting. clk_core_set_rate_nolock() walks from top
178 * to bottom, so it will change the rate of the PLL before
179 * chaging the parent of PMUX. This can result in pmux getting
180 * clocked twice the expected rate.
181 *
182 * Manually switch to PLL/2 here.
183 */
184 if (cnd->old_rate > DIV_THRESHOLD &&
185 cnd->new_rate < DIV_THRESHOLD)
186 clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_DIV_INDEX);
187 break;
188 case ABORT_RATE_CHANGE:
189 /* Revert manual change */
190 if (cnd->new_rate < DIV_THRESHOLD &&
191 cnd->old_rate > DIV_THRESHOLD)
192 clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_PLL_INDEX);
193 break;
194 default:
195 break;
196 }
197
198 return notifier_from_errno(0);
199 };
200
201 static struct clk_hw *cbf_msm8996_hw_clks[] = {
202 &cbf_pll_postdiv.hw,
203 };
204
205 static struct clk_regmap *cbf_msm8996_clks[] = {
206 &cbf_pll.clkr,
207 &cbf_mux.clkr,
208 };
209
210 static const struct regmap_config cbf_msm8996_regmap_config = {
211 .reg_bits = 32,
212 .reg_stride = 4,
213 .val_bits = 32,
214 .max_register = 0x10000,
215 .val_format_endian = REGMAP_ENDIAN_LITTLE,
216 };
217
218 #ifdef CONFIG_INTERCONNECT
219
220 /* Random ID that doesn't clash with main qnoc and OSM */
221 #define CBF_MASTER_NODE 2000
222
qcom_msm8996_cbf_icc_register(struct platform_device * pdev,struct clk_hw * cbf_hw)223 static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
224 {
225 struct device *dev = &pdev->dev;
226 struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
227 const struct icc_clk_data data[] = {
228 {
229 .clk = clk,
230 .name = "cbf",
231 .master_id = MASTER_CBF_M4M,
232 .slave_id = SLAVE_CBF_M4M,
233 },
234 };
235 struct icc_provider *provider;
236
237 provider = icc_clk_register(dev, CBF_MASTER_NODE, ARRAY_SIZE(data), data);
238 if (IS_ERR(provider))
239 return PTR_ERR(provider);
240
241 platform_set_drvdata(pdev, provider);
242
243 return 0;
244 }
245
qcom_msm8996_cbf_icc_remove(struct platform_device * pdev)246 static void qcom_msm8996_cbf_icc_remove(struct platform_device *pdev)
247 {
248 struct icc_provider *provider = platform_get_drvdata(pdev);
249
250 icc_clk_unregister(provider);
251 }
252 #define qcom_msm8996_cbf_icc_sync_state icc_sync_state
253 #else
qcom_msm8996_cbf_icc_register(struct platform_device * pdev,struct clk_hw * cbf_hw)254 static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
255 {
256 dev_warn(&pdev->dev, "CONFIG_INTERCONNECT is disabled, CBF clock is fixed\n");
257
258 return 0;
259 }
260 #define qcom_msm8996_cbf_icc_remove(pdev) { }
261 #define qcom_msm8996_cbf_icc_sync_state NULL
262 #endif
263
qcom_msm8996_cbf_probe(struct platform_device * pdev)264 static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
265 {
266 void __iomem *base;
267 struct regmap *regmap;
268 struct device *dev = &pdev->dev;
269 int i, ret;
270
271 base = devm_platform_ioremap_resource(pdev, 0);
272 if (IS_ERR(base))
273 return PTR_ERR(base);
274
275 regmap = devm_regmap_init_mmio(dev, base, &cbf_msm8996_regmap_config);
276 if (IS_ERR(regmap))
277 return PTR_ERR(regmap);
278
279 /* Select GPLL0 for 300MHz for the CBF clock */
280 regmap_write(regmap, CBF_MUX_OFFSET, 0x3);
281
282 /* Ensure write goes through before PLLs are reconfigured */
283 udelay(5);
284
285 /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
286 regmap_update_bits(regmap, CBF_MUX_OFFSET,
287 CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
288 CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
289
290 clk_alpha_pll_configure(&cbf_pll, regmap, &cbfpll_config);
291
292 /* Wait for PLL(s) to lock */
293 udelay(50);
294
295 /* Enable auto clock selection for CBF */
296 regmap_update_bits(regmap, CBF_MUX_OFFSET,
297 CBF_MUX_AUTO_CLK_SEL_BIT,
298 CBF_MUX_AUTO_CLK_SEL_BIT);
299
300 /* Ensure write goes through before muxes are switched */
301 udelay(5);
302
303 /* Switch CBF to use the primary PLL */
304 regmap_update_bits(regmap, CBF_MUX_OFFSET, CBF_MUX_PARENT_MASK, 0x1);
305
306 if (of_device_is_compatible(dev->of_node, "qcom,msm8996pro-cbf")) {
307 cbfpll_config.post_div_val = 0x3 << 8;
308 cbf_pll_postdiv.div = 4;
309 }
310
311 for (i = 0; i < ARRAY_SIZE(cbf_msm8996_hw_clks); i++) {
312 ret = devm_clk_hw_register(dev, cbf_msm8996_hw_clks[i]);
313 if (ret)
314 return ret;
315 }
316
317 for (i = 0; i < ARRAY_SIZE(cbf_msm8996_clks); i++) {
318 ret = devm_clk_register_regmap(dev, cbf_msm8996_clks[i]);
319 if (ret)
320 return ret;
321 }
322
323 ret = devm_clk_notifier_register(dev, cbf_mux.clkr.hw.clk, &cbf_mux.nb);
324 if (ret)
325 return ret;
326
327 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
328 if (ret)
329 return ret;
330
331 return qcom_msm8996_cbf_icc_register(pdev, &cbf_mux.clkr.hw);
332 }
333
qcom_msm8996_cbf_remove(struct platform_device * pdev)334 static void qcom_msm8996_cbf_remove(struct platform_device *pdev)
335 {
336 qcom_msm8996_cbf_icc_remove(pdev);
337 }
338
339 static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
340 { .compatible = "qcom,msm8996-cbf" },
341 { .compatible = "qcom,msm8996pro-cbf" },
342 { /* sentinel */ },
343 };
344 MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
345
346 static struct platform_driver qcom_msm8996_cbf_driver = {
347 .probe = qcom_msm8996_cbf_probe,
348 .remove = qcom_msm8996_cbf_remove,
349 .driver = {
350 .name = "qcom-msm8996-cbf",
351 .of_match_table = qcom_msm8996_cbf_match_table,
352 .sync_state = qcom_msm8996_cbf_icc_sync_state,
353 },
354 };
355
356 /* Register early enough to fix the clock to be used for other cores */
qcom_msm8996_cbf_init(void)357 static int __init qcom_msm8996_cbf_init(void)
358 {
359 return platform_driver_register(&qcom_msm8996_cbf_driver);
360 }
361 postcore_initcall(qcom_msm8996_cbf_init);
362
qcom_msm8996_cbf_exit(void)363 static void __exit qcom_msm8996_cbf_exit(void)
364 {
365 platform_driver_unregister(&qcom_msm8996_cbf_driver);
366 }
367 module_exit(qcom_msm8996_cbf_exit);
368
369 MODULE_DESCRIPTION("QCOM MSM8996 CPU Bus Fabric Clock Driver");
370 MODULE_LICENSE("GPL");
371