xref: /linux/drivers/clk/imx/clk-divider-gate.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP.
4  *   Dong Aisheng <aisheng.dong@nxp.com>
5  */
6 
7 #include <linux/clk-provider.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 
12 #include "clk.h"
13 
14 struct clk_divider_gate {
15 	struct clk_divider divider;
16 	u32 cached_val;
17 };
18 
to_clk_divider_gate(struct clk_hw * hw)19 static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw)
20 {
21 	struct clk_divider *div = to_clk_divider(hw);
22 
23 	return container_of(div, struct clk_divider_gate, divider);
24 }
25 
clk_divider_gate_recalc_rate_ro(struct clk_hw * hw,unsigned long parent_rate)26 static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
27 						     unsigned long parent_rate)
28 {
29 	struct clk_divider *div = to_clk_divider(hw);
30 	unsigned int val;
31 
32 	val = readl(div->reg) >> div->shift;
33 	val &= clk_div_mask(div->width);
34 	if (!val)
35 		return 0;
36 
37 	return divider_recalc_rate(hw, parent_rate, val, div->table,
38 				   div->flags, div->width);
39 }
40 
clk_divider_gate_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)41 static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
42 						  unsigned long parent_rate)
43 {
44 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
45 	struct clk_divider *div = to_clk_divider(hw);
46 	unsigned long flags;
47 	unsigned int val;
48 
49 	spin_lock_irqsave(div->lock, flags);
50 
51 	if (!clk_hw_is_enabled(hw)) {
52 		val = div_gate->cached_val;
53 	} else {
54 		val = readl(div->reg) >> div->shift;
55 		val &= clk_div_mask(div->width);
56 	}
57 
58 	spin_unlock_irqrestore(div->lock, flags);
59 
60 	if (!val)
61 		return 0;
62 
63 	return divider_recalc_rate(hw, parent_rate, val, div->table,
64 				   div->flags, div->width);
65 }
66 
clk_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)67 static int clk_divider_determine_rate(struct clk_hw *hw,
68 				      struct clk_rate_request *req)
69 {
70 	return clk_divider_ops.determine_rate(hw, req);
71 }
72 
clk_divider_gate_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)73 static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
74 				unsigned long parent_rate)
75 {
76 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
77 	struct clk_divider *div = to_clk_divider(hw);
78 	unsigned long flags;
79 	int value;
80 	u32 val;
81 
82 	value = divider_get_val(rate, parent_rate, div->table,
83 				div->width, div->flags);
84 	if (value < 0)
85 		return value;
86 
87 	spin_lock_irqsave(div->lock, flags);
88 
89 	if (clk_hw_is_enabled(hw)) {
90 		val = readl(div->reg);
91 		val &= ~(clk_div_mask(div->width) << div->shift);
92 		val |= (u32)value << div->shift;
93 		writel(val, div->reg);
94 	} else {
95 		div_gate->cached_val = value;
96 	}
97 
98 	spin_unlock_irqrestore(div->lock, flags);
99 
100 	return 0;
101 }
102 
clk_divider_enable(struct clk_hw * hw)103 static int clk_divider_enable(struct clk_hw *hw)
104 {
105 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
106 	struct clk_divider *div = to_clk_divider(hw);
107 	unsigned long flags;
108 	u32 val;
109 
110 	if (!div_gate->cached_val) {
111 		pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw));
112 		return -EINVAL;
113 	}
114 
115 	spin_lock_irqsave(div->lock, flags);
116 	/* restore div val */
117 	val = readl(div->reg);
118 	val |= div_gate->cached_val << div->shift;
119 	writel(val, div->reg);
120 
121 	spin_unlock_irqrestore(div->lock, flags);
122 
123 	return 0;
124 }
125 
clk_divider_disable(struct clk_hw * hw)126 static void clk_divider_disable(struct clk_hw *hw)
127 {
128 	struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
129 	struct clk_divider *div = to_clk_divider(hw);
130 	unsigned long flags;
131 	u32 val;
132 
133 	spin_lock_irqsave(div->lock, flags);
134 
135 	/* store the current div val */
136 	val = readl(div->reg) >> div->shift;
137 	val &= clk_div_mask(div->width);
138 	div_gate->cached_val = val;
139 	writel(0, div->reg);
140 
141 	spin_unlock_irqrestore(div->lock, flags);
142 }
143 
clk_divider_is_enabled(struct clk_hw * hw)144 static int clk_divider_is_enabled(struct clk_hw *hw)
145 {
146 	struct clk_divider *div = to_clk_divider(hw);
147 	u32 val;
148 
149 	val = readl(div->reg) >> div->shift;
150 	val &= clk_div_mask(div->width);
151 
152 	return val ? 1 : 0;
153 }
154 
155 static const struct clk_ops clk_divider_gate_ro_ops = {
156 	.recalc_rate = clk_divider_gate_recalc_rate_ro,
157 	.determine_rate = clk_divider_determine_rate,
158 };
159 
160 static const struct clk_ops clk_divider_gate_ops = {
161 	.recalc_rate = clk_divider_gate_recalc_rate,
162 	.determine_rate = clk_divider_determine_rate,
163 	.set_rate = clk_divider_gate_set_rate,
164 	.enable = clk_divider_enable,
165 	.disable = clk_divider_disable,
166 	.is_enabled = clk_divider_is_enabled,
167 };
168 
169 /*
170  * NOTE: In order to reuse the most code from the common divider,
171  * we also design our divider following the way that provids an extra
172  * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
173  * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
174  * flag which can be specified by user flexibly.
175  */
imx_clk_hw_divider_gate(const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u8 shift,u8 width,u8 clk_divider_flags,const struct clk_div_table * table,spinlock_t * lock)176 struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
177 				    unsigned long flags, void __iomem *reg,
178 				    u8 shift, u8 width, u8 clk_divider_flags,
179 				    const struct clk_div_table *table,
180 				    spinlock_t *lock)
181 {
182 	struct clk_init_data init;
183 	struct clk_divider_gate *div_gate;
184 	struct clk_hw *hw;
185 	u32 val;
186 	int ret;
187 
188 	div_gate  = kzalloc(sizeof(*div_gate), GFP_KERNEL);
189 	if (!div_gate)
190 		return ERR_PTR(-ENOMEM);
191 
192 	init.name = name;
193 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
194 		init.ops = &clk_divider_gate_ro_ops;
195 	else
196 		init.ops = &clk_divider_gate_ops;
197 	init.flags = flags;
198 	init.parent_names = parent_name ? &parent_name : NULL;
199 	init.num_parents = parent_name ? 1 : 0;
200 
201 	div_gate->divider.reg = reg;
202 	div_gate->divider.shift = shift;
203 	div_gate->divider.width = width;
204 	div_gate->divider.lock = lock;
205 	div_gate->divider.table = table;
206 	div_gate->divider.hw.init = &init;
207 	div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
208 	/* cache gate status */
209 	val = readl(reg) >> shift;
210 	val &= clk_div_mask(width);
211 	div_gate->cached_val = val;
212 
213 	hw = &div_gate->divider.hw;
214 	ret = clk_hw_register(NULL, hw);
215 	if (ret) {
216 		kfree(div_gate);
217 		hw = ERR_PTR(ret);
218 	}
219 
220 	return hw;
221 }
222