xref: /linux/drivers/clk/qcom/clk-cbf-8996.c (revision 12dc71953e664f084918d3e1b63b211b0e6f8e98)
1ca574a5dSDmitry Baryshkov // SPDX-License-Identifier: GPL-2.0
2ca574a5dSDmitry Baryshkov /*
3ca574a5dSDmitry Baryshkov  * Copyright (c) 2022, 2023 Linaro Ltd.
4ca574a5dSDmitry Baryshkov  */
5ca574a5dSDmitry Baryshkov #include <linux/bitfield.h>
6ca574a5dSDmitry Baryshkov #include <linux/clk.h>
7ca574a5dSDmitry Baryshkov #include <linux/clk-provider.h>
8*12dc7195SDmitry Baryshkov #include <linux/interconnect-clk.h>
9*12dc7195SDmitry Baryshkov #include <linux/interconnect-provider.h>
10ca574a5dSDmitry Baryshkov #include <linux/of.h>
11ca574a5dSDmitry Baryshkov #include <linux/module.h>
12ca574a5dSDmitry Baryshkov #include <linux/platform_device.h>
13ca574a5dSDmitry Baryshkov #include <linux/regmap.h>
14ca574a5dSDmitry Baryshkov 
15*12dc7195SDmitry Baryshkov #include <dt-bindings/interconnect/qcom,msm8996-cbf.h>
16*12dc7195SDmitry Baryshkov 
17ca574a5dSDmitry Baryshkov #include "clk-alpha-pll.h"
18ca574a5dSDmitry Baryshkov #include "clk-regmap.h"
19ca574a5dSDmitry Baryshkov 
20ca574a5dSDmitry Baryshkov /* Need to match the order of clocks in DT binding */
21ca574a5dSDmitry Baryshkov enum {
22ca574a5dSDmitry Baryshkov 	DT_XO,
23ca574a5dSDmitry Baryshkov 	DT_APCS_AUX,
24ca574a5dSDmitry Baryshkov };
25ca574a5dSDmitry Baryshkov 
26ca574a5dSDmitry Baryshkov enum {
27ca574a5dSDmitry Baryshkov 	CBF_XO_INDEX,
28ca574a5dSDmitry Baryshkov 	CBF_PLL_INDEX,
29ca574a5dSDmitry Baryshkov 	CBF_DIV_INDEX,
30ca574a5dSDmitry Baryshkov 	CBF_APCS_AUX_INDEX,
31ca574a5dSDmitry Baryshkov };
32ca574a5dSDmitry Baryshkov 
33ca574a5dSDmitry Baryshkov #define DIV_THRESHOLD		600000000
34ca574a5dSDmitry Baryshkov 
35ca574a5dSDmitry Baryshkov #define CBF_MUX_OFFSET		0x18
36ca574a5dSDmitry Baryshkov #define CBF_MUX_PARENT_MASK		GENMASK(1, 0)
37ca574a5dSDmitry Baryshkov #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
38ca574a5dSDmitry Baryshkov #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
39ca574a5dSDmitry Baryshkov 	FIELD_PREP(CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
40ca574a5dSDmitry Baryshkov #define CBF_MUX_AUTO_CLK_SEL_BIT	BIT(6)
41ca574a5dSDmitry Baryshkov 
42ca574a5dSDmitry Baryshkov #define CBF_PLL_OFFSET 0xf000
43ca574a5dSDmitry Baryshkov 
44ca574a5dSDmitry Baryshkov static const u8 cbf_pll_regs[PLL_OFF_MAX_REGS] = {
45ca574a5dSDmitry Baryshkov 	[PLL_OFF_L_VAL] = 0x08,
46ca574a5dSDmitry Baryshkov 	[PLL_OFF_ALPHA_VAL] = 0x10,
47ca574a5dSDmitry Baryshkov 	[PLL_OFF_USER_CTL] = 0x18,
48ca574a5dSDmitry Baryshkov 	[PLL_OFF_CONFIG_CTL] = 0x20,
49ca574a5dSDmitry Baryshkov 	[PLL_OFF_CONFIG_CTL_U] = 0x24,
50ca574a5dSDmitry Baryshkov 	[PLL_OFF_TEST_CTL] = 0x30,
51ca574a5dSDmitry Baryshkov 	[PLL_OFF_TEST_CTL_U] = 0x34,
52ca574a5dSDmitry Baryshkov 	[PLL_OFF_STATUS] = 0x28,
53ca574a5dSDmitry Baryshkov };
54ca574a5dSDmitry Baryshkov 
55ca574a5dSDmitry Baryshkov static const struct alpha_pll_config cbfpll_config = {
56ca574a5dSDmitry Baryshkov 	.l = 72,
57ca574a5dSDmitry Baryshkov 	.config_ctl_val = 0x200d4828,
58ca574a5dSDmitry Baryshkov 	.config_ctl_hi_val = 0x006,
59ca574a5dSDmitry Baryshkov 	.test_ctl_val = 0x1c000000,
60ca574a5dSDmitry Baryshkov 	.test_ctl_hi_val = 0x00004000,
61ca574a5dSDmitry Baryshkov 	.pre_div_mask = BIT(12),
62ca574a5dSDmitry Baryshkov 	.post_div_mask = 0x3 << 8,
63ca574a5dSDmitry Baryshkov 	.post_div_val = 0x1 << 8,
64ca574a5dSDmitry Baryshkov 	.main_output_mask = BIT(0),
65ca574a5dSDmitry Baryshkov 	.early_output_mask = BIT(3),
66ca574a5dSDmitry Baryshkov };
67ca574a5dSDmitry Baryshkov 
68ca574a5dSDmitry Baryshkov static struct clk_alpha_pll cbf_pll = {
69ca574a5dSDmitry Baryshkov 	.offset = CBF_PLL_OFFSET,
70ca574a5dSDmitry Baryshkov 	.regs = cbf_pll_regs,
71ca574a5dSDmitry Baryshkov 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
72ca574a5dSDmitry Baryshkov 	.clkr.hw.init = &(struct clk_init_data){
73ca574a5dSDmitry Baryshkov 		.name = "cbf_pll",
74ca574a5dSDmitry Baryshkov 		.parent_data = (const struct clk_parent_data[]) {
75ca574a5dSDmitry Baryshkov 			{ .index = DT_XO, },
76ca574a5dSDmitry Baryshkov 		},
77ca574a5dSDmitry Baryshkov 		.num_parents = 1,
78ca574a5dSDmitry Baryshkov 		.ops = &clk_alpha_pll_hwfsm_ops,
79ca574a5dSDmitry Baryshkov 	},
80ca574a5dSDmitry Baryshkov };
81ca574a5dSDmitry Baryshkov 
82ca574a5dSDmitry Baryshkov static struct clk_fixed_factor cbf_pll_postdiv = {
83ca574a5dSDmitry Baryshkov 	.mult = 1,
84ca574a5dSDmitry Baryshkov 	.div = 2,
85ca574a5dSDmitry Baryshkov 	.hw.init = &(struct clk_init_data){
86ca574a5dSDmitry Baryshkov 		.name = "cbf_pll_postdiv",
87ca574a5dSDmitry Baryshkov 		.parent_hws = (const struct clk_hw*[]){
88ca574a5dSDmitry Baryshkov 			&cbf_pll.clkr.hw
89ca574a5dSDmitry Baryshkov 		},
90ca574a5dSDmitry Baryshkov 		.num_parents = 1,
91ca574a5dSDmitry Baryshkov 		.ops = &clk_fixed_factor_ops,
92ca574a5dSDmitry Baryshkov 		.flags = CLK_SET_RATE_PARENT,
93ca574a5dSDmitry Baryshkov 	},
94ca574a5dSDmitry Baryshkov };
95ca574a5dSDmitry Baryshkov 
96ca574a5dSDmitry Baryshkov static const struct clk_parent_data cbf_mux_parent_data[] = {
97ca574a5dSDmitry Baryshkov 	{ .index = DT_XO },
98ca574a5dSDmitry Baryshkov 	{ .hw = &cbf_pll.clkr.hw },
99ca574a5dSDmitry Baryshkov 	{ .hw = &cbf_pll_postdiv.hw },
100ca574a5dSDmitry Baryshkov 	{ .index = DT_APCS_AUX },
101ca574a5dSDmitry Baryshkov };
102ca574a5dSDmitry Baryshkov 
103ca574a5dSDmitry Baryshkov struct clk_cbf_8996_mux {
104ca574a5dSDmitry Baryshkov 	u32 reg;
105ca574a5dSDmitry Baryshkov 	struct notifier_block nb;
106ca574a5dSDmitry Baryshkov 	struct clk_regmap clkr;
107ca574a5dSDmitry Baryshkov };
108ca574a5dSDmitry Baryshkov 
109ca574a5dSDmitry Baryshkov static struct clk_cbf_8996_mux *to_clk_cbf_8996_mux(struct clk_regmap *clkr)
110ca574a5dSDmitry Baryshkov {
111ca574a5dSDmitry Baryshkov 	return container_of(clkr, struct clk_cbf_8996_mux, clkr);
112ca574a5dSDmitry Baryshkov }
113ca574a5dSDmitry Baryshkov 
114ca574a5dSDmitry Baryshkov static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
115ca574a5dSDmitry Baryshkov 			       void *data);
116ca574a5dSDmitry Baryshkov 
117ca574a5dSDmitry Baryshkov static u8 clk_cbf_8996_mux_get_parent(struct clk_hw *hw)
118ca574a5dSDmitry Baryshkov {
119ca574a5dSDmitry Baryshkov 	struct clk_regmap *clkr = to_clk_regmap(hw);
120ca574a5dSDmitry Baryshkov 	struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
121ca574a5dSDmitry Baryshkov 	u32 val;
122ca574a5dSDmitry Baryshkov 
123ca574a5dSDmitry Baryshkov 	regmap_read(clkr->regmap, mux->reg, &val);
124ca574a5dSDmitry Baryshkov 
125ca574a5dSDmitry Baryshkov 	return FIELD_GET(CBF_MUX_PARENT_MASK, val);
126ca574a5dSDmitry Baryshkov }
127ca574a5dSDmitry Baryshkov 
128ca574a5dSDmitry Baryshkov static int clk_cbf_8996_mux_set_parent(struct clk_hw *hw, u8 index)
129ca574a5dSDmitry Baryshkov {
130ca574a5dSDmitry Baryshkov 	struct clk_regmap *clkr = to_clk_regmap(hw);
131ca574a5dSDmitry Baryshkov 	struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
132ca574a5dSDmitry Baryshkov 	u32 val;
133ca574a5dSDmitry Baryshkov 
134ca574a5dSDmitry Baryshkov 	val = FIELD_PREP(CBF_MUX_PARENT_MASK, index);
135ca574a5dSDmitry Baryshkov 
136ca574a5dSDmitry Baryshkov 	return regmap_update_bits(clkr->regmap, mux->reg, CBF_MUX_PARENT_MASK, val);
137ca574a5dSDmitry Baryshkov }
138ca574a5dSDmitry Baryshkov 
139ca574a5dSDmitry Baryshkov static int clk_cbf_8996_mux_determine_rate(struct clk_hw *hw,
140ca574a5dSDmitry Baryshkov 					   struct clk_rate_request *req)
141ca574a5dSDmitry Baryshkov {
142ca574a5dSDmitry Baryshkov 	struct clk_hw *parent;
143ca574a5dSDmitry Baryshkov 
144ca574a5dSDmitry Baryshkov 	if (req->rate < (DIV_THRESHOLD / 2))
145ca574a5dSDmitry Baryshkov 		return -EINVAL;
146ca574a5dSDmitry Baryshkov 
147ca574a5dSDmitry Baryshkov 	if (req->rate < DIV_THRESHOLD)
148ca574a5dSDmitry Baryshkov 		parent = clk_hw_get_parent_by_index(hw, CBF_DIV_INDEX);
149ca574a5dSDmitry Baryshkov 	else
150ca574a5dSDmitry Baryshkov 		parent = clk_hw_get_parent_by_index(hw, CBF_PLL_INDEX);
151ca574a5dSDmitry Baryshkov 
152ca574a5dSDmitry Baryshkov 	if (!parent)
153ca574a5dSDmitry Baryshkov 		return -EINVAL;
154ca574a5dSDmitry Baryshkov 
155ca574a5dSDmitry Baryshkov 	req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
156ca574a5dSDmitry Baryshkov 	req->best_parent_hw = parent;
157ca574a5dSDmitry Baryshkov 
158ca574a5dSDmitry Baryshkov 	return 0;
159ca574a5dSDmitry Baryshkov }
160ca574a5dSDmitry Baryshkov 
161ca574a5dSDmitry Baryshkov static const struct clk_ops clk_cbf_8996_mux_ops = {
162ca574a5dSDmitry Baryshkov 	.set_parent = clk_cbf_8996_mux_set_parent,
163ca574a5dSDmitry Baryshkov 	.get_parent = clk_cbf_8996_mux_get_parent,
164ca574a5dSDmitry Baryshkov 	.determine_rate = clk_cbf_8996_mux_determine_rate,
165ca574a5dSDmitry Baryshkov };
166ca574a5dSDmitry Baryshkov 
167ca574a5dSDmitry Baryshkov static struct clk_cbf_8996_mux cbf_mux = {
168ca574a5dSDmitry Baryshkov 	.reg = CBF_MUX_OFFSET,
169ca574a5dSDmitry Baryshkov 	.nb.notifier_call = cbf_clk_notifier_cb,
170ca574a5dSDmitry Baryshkov 	.clkr.hw.init = &(struct clk_init_data) {
171ca574a5dSDmitry Baryshkov 		.name = "cbf_mux",
172ca574a5dSDmitry Baryshkov 		.parent_data = cbf_mux_parent_data,
173ca574a5dSDmitry Baryshkov 		.num_parents = ARRAY_SIZE(cbf_mux_parent_data),
174ca574a5dSDmitry Baryshkov 		.ops = &clk_cbf_8996_mux_ops,
175ca574a5dSDmitry Baryshkov 		/* CPU clock is critical and should never be gated */
176ca574a5dSDmitry Baryshkov 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
177ca574a5dSDmitry Baryshkov 	},
178ca574a5dSDmitry Baryshkov };
179ca574a5dSDmitry Baryshkov 
180ca574a5dSDmitry Baryshkov static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
181ca574a5dSDmitry Baryshkov 			       void *data)
182ca574a5dSDmitry Baryshkov {
183ca574a5dSDmitry Baryshkov 	struct clk_notifier_data *cnd = data;
184ca574a5dSDmitry Baryshkov 
185ca574a5dSDmitry Baryshkov 	switch (event) {
186ca574a5dSDmitry Baryshkov 	case PRE_RATE_CHANGE:
187ca574a5dSDmitry Baryshkov 		/*
188ca574a5dSDmitry Baryshkov 		 * Avoid overvolting. clk_core_set_rate_nolock() walks from top
189ca574a5dSDmitry Baryshkov 		 * to bottom, so it will change the rate of the PLL before
190ca574a5dSDmitry Baryshkov 		 * chaging the parent of PMUX. This can result in pmux getting
191ca574a5dSDmitry Baryshkov 		 * clocked twice the expected rate.
192ca574a5dSDmitry Baryshkov 		 *
193ca574a5dSDmitry Baryshkov 		 * Manually switch to PLL/2 here.
194ca574a5dSDmitry Baryshkov 		 */
195ca574a5dSDmitry Baryshkov 		if (cnd->old_rate > DIV_THRESHOLD &&
196ca574a5dSDmitry Baryshkov 		    cnd->new_rate < DIV_THRESHOLD)
197ca574a5dSDmitry Baryshkov 			clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_DIV_INDEX);
198ca574a5dSDmitry Baryshkov 		break;
199ca574a5dSDmitry Baryshkov 	case ABORT_RATE_CHANGE:
200ca574a5dSDmitry Baryshkov 		/* Revert manual change */
201ca574a5dSDmitry Baryshkov 		if (cnd->new_rate < DIV_THRESHOLD &&
202ca574a5dSDmitry Baryshkov 		    cnd->old_rate > DIV_THRESHOLD)
203ca574a5dSDmitry Baryshkov 			clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_PLL_INDEX);
204ca574a5dSDmitry Baryshkov 		break;
205ca574a5dSDmitry Baryshkov 	default:
206ca574a5dSDmitry Baryshkov 		break;
207ca574a5dSDmitry Baryshkov 	}
208ca574a5dSDmitry Baryshkov 
209ca574a5dSDmitry Baryshkov 	return notifier_from_errno(0);
210ca574a5dSDmitry Baryshkov };
211ca574a5dSDmitry Baryshkov 
212ca574a5dSDmitry Baryshkov static struct clk_hw *cbf_msm8996_hw_clks[] = {
213ca574a5dSDmitry Baryshkov 	&cbf_pll_postdiv.hw,
214ca574a5dSDmitry Baryshkov };
215ca574a5dSDmitry Baryshkov 
216ca574a5dSDmitry Baryshkov static struct clk_regmap *cbf_msm8996_clks[] = {
217ca574a5dSDmitry Baryshkov 	&cbf_pll.clkr,
218ca574a5dSDmitry Baryshkov 	&cbf_mux.clkr,
219ca574a5dSDmitry Baryshkov };
220ca574a5dSDmitry Baryshkov 
221ca574a5dSDmitry Baryshkov static const struct regmap_config cbf_msm8996_regmap_config = {
222ca574a5dSDmitry Baryshkov 	.reg_bits		= 32,
223ca574a5dSDmitry Baryshkov 	.reg_stride		= 4,
224ca574a5dSDmitry Baryshkov 	.val_bits		= 32,
225ca574a5dSDmitry Baryshkov 	.max_register		= 0x10000,
226ca574a5dSDmitry Baryshkov 	.fast_io		= true,
227ca574a5dSDmitry Baryshkov 	.val_format_endian	= REGMAP_ENDIAN_LITTLE,
228ca574a5dSDmitry Baryshkov };
229ca574a5dSDmitry Baryshkov 
230*12dc7195SDmitry Baryshkov #ifdef CONFIG_INTERCONNECT
231*12dc7195SDmitry Baryshkov 
232*12dc7195SDmitry Baryshkov /* Random ID that doesn't clash with main qnoc and OSM */
233*12dc7195SDmitry Baryshkov #define CBF_MASTER_NODE 2000
234*12dc7195SDmitry Baryshkov 
235*12dc7195SDmitry Baryshkov static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
236*12dc7195SDmitry Baryshkov {
237*12dc7195SDmitry Baryshkov 	struct device *dev = &pdev->dev;
238*12dc7195SDmitry Baryshkov 	struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
239*12dc7195SDmitry Baryshkov 	const struct icc_clk_data data[] = {
240*12dc7195SDmitry Baryshkov 		{ .clk = clk, .name = "cbf", },
241*12dc7195SDmitry Baryshkov 	};
242*12dc7195SDmitry Baryshkov 	struct icc_provider *provider;
243*12dc7195SDmitry Baryshkov 
244*12dc7195SDmitry Baryshkov 	provider = icc_clk_register(dev, CBF_MASTER_NODE, ARRAY_SIZE(data), data);
245*12dc7195SDmitry Baryshkov 	if (IS_ERR(provider))
246*12dc7195SDmitry Baryshkov 		return PTR_ERR(provider);
247*12dc7195SDmitry Baryshkov 
248*12dc7195SDmitry Baryshkov 	platform_set_drvdata(pdev, provider);
249*12dc7195SDmitry Baryshkov 
250*12dc7195SDmitry Baryshkov 	return 0;
251*12dc7195SDmitry Baryshkov }
252*12dc7195SDmitry Baryshkov 
253*12dc7195SDmitry Baryshkov static int qcom_msm8996_cbf_icc_remove(struct platform_device *pdev)
254*12dc7195SDmitry Baryshkov {
255*12dc7195SDmitry Baryshkov 	struct icc_provider *provider = platform_get_drvdata(pdev);
256*12dc7195SDmitry Baryshkov 
257*12dc7195SDmitry Baryshkov 	icc_clk_unregister(provider);
258*12dc7195SDmitry Baryshkov 
259*12dc7195SDmitry Baryshkov 	return 0;
260*12dc7195SDmitry Baryshkov }
261*12dc7195SDmitry Baryshkov #define qcom_msm8996_cbf_icc_sync_state icc_sync_state
262*12dc7195SDmitry Baryshkov #else
263*12dc7195SDmitry Baryshkov static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev,  struct clk_hw *cbf_hw)
264*12dc7195SDmitry Baryshkov {
265*12dc7195SDmitry Baryshkov 	dev_warn(&pdev->dev, "CONFIG_INTERCONNECT is disabled, CBF clock is fixed\n");
266*12dc7195SDmitry Baryshkov 
267*12dc7195SDmitry Baryshkov 	return 0;
268*12dc7195SDmitry Baryshkov }
269*12dc7195SDmitry Baryshkov #define qcom_msm8996_cbf_icc_remove(pdev) (0)
270*12dc7195SDmitry Baryshkov #define qcom_msm8996_cbf_icc_sync_state NULL
271*12dc7195SDmitry Baryshkov #endif
272*12dc7195SDmitry Baryshkov 
273ca574a5dSDmitry Baryshkov static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
274ca574a5dSDmitry Baryshkov {
275ca574a5dSDmitry Baryshkov 	void __iomem *base;
276ca574a5dSDmitry Baryshkov 	struct regmap *regmap;
277ca574a5dSDmitry Baryshkov 	struct device *dev = &pdev->dev;
278ca574a5dSDmitry Baryshkov 	int i, ret;
279ca574a5dSDmitry Baryshkov 
280ca574a5dSDmitry Baryshkov 	base = devm_platform_ioremap_resource(pdev, 0);
281ca574a5dSDmitry Baryshkov 	if (IS_ERR(base))
282ca574a5dSDmitry Baryshkov 		return PTR_ERR(base);
283ca574a5dSDmitry Baryshkov 
284ca574a5dSDmitry Baryshkov 	regmap = devm_regmap_init_mmio(dev, base, &cbf_msm8996_regmap_config);
285ca574a5dSDmitry Baryshkov 	if (IS_ERR(regmap))
286ca574a5dSDmitry Baryshkov 		return PTR_ERR(regmap);
287ca574a5dSDmitry Baryshkov 
288ca574a5dSDmitry Baryshkov 	/* Select GPLL0 for 300MHz for the CBF clock */
289ca574a5dSDmitry Baryshkov 	regmap_write(regmap, CBF_MUX_OFFSET, 0x3);
290ca574a5dSDmitry Baryshkov 
291ca574a5dSDmitry Baryshkov 	/* Ensure write goes through before PLLs are reconfigured */
292ca574a5dSDmitry Baryshkov 	udelay(5);
293ca574a5dSDmitry Baryshkov 
294ca574a5dSDmitry Baryshkov 	/* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
295ca574a5dSDmitry Baryshkov 	regmap_update_bits(regmap, CBF_MUX_OFFSET,
296ca574a5dSDmitry Baryshkov 			   CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
297ca574a5dSDmitry Baryshkov 			   CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
298ca574a5dSDmitry Baryshkov 
299ca574a5dSDmitry Baryshkov 	clk_alpha_pll_configure(&cbf_pll, regmap, &cbfpll_config);
300ca574a5dSDmitry Baryshkov 
301ca574a5dSDmitry Baryshkov 	/* Wait for PLL(s) to lock */
302ca574a5dSDmitry Baryshkov 	udelay(50);
303ca574a5dSDmitry Baryshkov 
304ca574a5dSDmitry Baryshkov 	/* Enable auto clock selection for CBF */
305ca574a5dSDmitry Baryshkov 	regmap_update_bits(regmap, CBF_MUX_OFFSET,
306ca574a5dSDmitry Baryshkov 			   CBF_MUX_AUTO_CLK_SEL_BIT,
307ca574a5dSDmitry Baryshkov 			   CBF_MUX_AUTO_CLK_SEL_BIT);
308ca574a5dSDmitry Baryshkov 
309ca574a5dSDmitry Baryshkov 	/* Ensure write goes through before muxes are switched */
310ca574a5dSDmitry Baryshkov 	udelay(5);
311ca574a5dSDmitry Baryshkov 
312ca574a5dSDmitry Baryshkov 	/* Switch CBF to use the primary PLL */
313ca574a5dSDmitry Baryshkov 	regmap_update_bits(regmap, CBF_MUX_OFFSET, CBF_MUX_PARENT_MASK, 0x1);
314ca574a5dSDmitry Baryshkov 
315ca574a5dSDmitry Baryshkov 	for (i = 0; i < ARRAY_SIZE(cbf_msm8996_hw_clks); i++) {
316ca574a5dSDmitry Baryshkov 		ret = devm_clk_hw_register(dev, cbf_msm8996_hw_clks[i]);
317ca574a5dSDmitry Baryshkov 		if (ret)
318ca574a5dSDmitry Baryshkov 			return ret;
319ca574a5dSDmitry Baryshkov 	}
320ca574a5dSDmitry Baryshkov 
321ca574a5dSDmitry Baryshkov 	for (i = 0; i < ARRAY_SIZE(cbf_msm8996_clks); i++) {
322ca574a5dSDmitry Baryshkov 		ret = devm_clk_register_regmap(dev, cbf_msm8996_clks[i]);
323ca574a5dSDmitry Baryshkov 		if (ret)
324ca574a5dSDmitry Baryshkov 			return ret;
325ca574a5dSDmitry Baryshkov 	}
326ca574a5dSDmitry Baryshkov 
327ca574a5dSDmitry Baryshkov 	ret = devm_clk_notifier_register(dev, cbf_mux.clkr.hw.clk, &cbf_mux.nb);
328ca574a5dSDmitry Baryshkov 	if (ret)
329ca574a5dSDmitry Baryshkov 		return ret;
330ca574a5dSDmitry Baryshkov 
331*12dc7195SDmitry Baryshkov 	ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
332*12dc7195SDmitry Baryshkov 	if (ret)
333*12dc7195SDmitry Baryshkov 		return ret;
334*12dc7195SDmitry Baryshkov 
335*12dc7195SDmitry Baryshkov 	return qcom_msm8996_cbf_icc_register(pdev, &cbf_mux.clkr.hw);
336*12dc7195SDmitry Baryshkov }
337*12dc7195SDmitry Baryshkov 
338*12dc7195SDmitry Baryshkov static int qcom_msm8996_cbf_remove(struct platform_device *pdev)
339*12dc7195SDmitry Baryshkov {
340*12dc7195SDmitry Baryshkov 	return qcom_msm8996_cbf_icc_remove(pdev);
341ca574a5dSDmitry Baryshkov }
342ca574a5dSDmitry Baryshkov 
343ca574a5dSDmitry Baryshkov static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
344ca574a5dSDmitry Baryshkov 	{ .compatible = "qcom,msm8996-cbf" },
345ca574a5dSDmitry Baryshkov 	{ /* sentinel */ },
346ca574a5dSDmitry Baryshkov };
347ca574a5dSDmitry Baryshkov MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
348ca574a5dSDmitry Baryshkov 
349ca574a5dSDmitry Baryshkov static struct platform_driver qcom_msm8996_cbf_driver = {
350ca574a5dSDmitry Baryshkov 	.probe = qcom_msm8996_cbf_probe,
351*12dc7195SDmitry Baryshkov 	.remove = qcom_msm8996_cbf_remove,
352ca574a5dSDmitry Baryshkov 	.driver = {
353ca574a5dSDmitry Baryshkov 		.name = "qcom-msm8996-cbf",
354ca574a5dSDmitry Baryshkov 		.of_match_table = qcom_msm8996_cbf_match_table,
355*12dc7195SDmitry Baryshkov 		.sync_state = qcom_msm8996_cbf_icc_sync_state,
356ca574a5dSDmitry Baryshkov 	},
357ca574a5dSDmitry Baryshkov };
358ca574a5dSDmitry Baryshkov 
359ca574a5dSDmitry Baryshkov /* Register early enough to fix the clock to be used for other cores */
360ca574a5dSDmitry Baryshkov static int __init qcom_msm8996_cbf_init(void)
361ca574a5dSDmitry Baryshkov {
362ca574a5dSDmitry Baryshkov 	return platform_driver_register(&qcom_msm8996_cbf_driver);
363ca574a5dSDmitry Baryshkov }
364ca574a5dSDmitry Baryshkov postcore_initcall(qcom_msm8996_cbf_init);
365ca574a5dSDmitry Baryshkov 
366ca574a5dSDmitry Baryshkov static void __exit qcom_msm8996_cbf_exit(void)
367ca574a5dSDmitry Baryshkov {
368ca574a5dSDmitry Baryshkov 	platform_driver_unregister(&qcom_msm8996_cbf_driver);
369ca574a5dSDmitry Baryshkov }
370ca574a5dSDmitry Baryshkov module_exit(qcom_msm8996_cbf_exit);
371ca574a5dSDmitry Baryshkov 
372ca574a5dSDmitry Baryshkov MODULE_DESCRIPTION("QCOM MSM8996 CPU Bus Fabric Clock Driver");
373ca574a5dSDmitry Baryshkov MODULE_LICENSE("GPL");
374