xref: /linux/drivers/firmware/arm_scmi/clock.c (revision 856e7c4b619af622d56b3b454f7bec32a170ac99)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018 ARM Ltd.
6  */
7 
8 #include "common.h"
9 
10 enum scmi_clock_protocol_cmd {
11 	CLOCK_ATTRIBUTES = 0x3,
12 	CLOCK_DESCRIBE_RATES = 0x4,
13 	CLOCK_RATE_SET = 0x5,
14 	CLOCK_RATE_GET = 0x6,
15 	CLOCK_CONFIG_SET = 0x7,
16 };
17 
18 struct scmi_msg_resp_clock_protocol_attributes {
19 	__le16 num_clocks;
20 	u8 max_async_req;
21 	u8 reserved;
22 };
23 
24 struct scmi_msg_resp_clock_attributes {
25 	__le32 attributes;
26 #define	CLOCK_ENABLE	BIT(0)
27 	    u8 name[SCMI_MAX_STR_SIZE];
28 };
29 
30 struct scmi_clock_set_config {
31 	__le32 id;
32 	__le32 attributes;
33 };
34 
35 struct scmi_msg_clock_describe_rates {
36 	__le32 id;
37 	__le32 rate_index;
38 };
39 
40 struct scmi_msg_resp_clock_describe_rates {
41 	__le32 num_rates_flags;
42 #define NUM_RETURNED(x)		((x) & 0xfff)
43 #define RATE_DISCRETE(x)	!((x) & BIT(12))
44 #define NUM_REMAINING(x)	((x) >> 16)
45 	struct {
46 		__le32 value_low;
47 		__le32 value_high;
48 	} rate[0];
49 #define RATE_TO_U64(X)		\
50 ({				\
51 	typeof(X) x = (X);	\
52 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
53 })
54 };
55 
56 struct scmi_clock_set_rate {
57 	__le32 flags;
58 #define CLOCK_SET_ASYNC		BIT(0)
59 #define CLOCK_SET_DELAYED	BIT(1)
60 #define CLOCK_SET_ROUND_UP	BIT(2)
61 #define CLOCK_SET_ROUND_AUTO	BIT(3)
62 	__le32 id;
63 	__le32 value_low;
64 	__le32 value_high;
65 };
66 
67 struct clock_info {
68 	int num_clocks;
69 	int max_async_req;
70 	struct scmi_clock_info *clk;
71 };
72 
73 static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
74 					      struct clock_info *ci)
75 {
76 	int ret;
77 	struct scmi_xfer *t;
78 	struct scmi_msg_resp_clock_protocol_attributes *attr;
79 
80 	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
81 				 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
82 	if (ret)
83 		return ret;
84 
85 	attr = t->rx.buf;
86 
87 	ret = scmi_do_xfer(handle, t);
88 	if (!ret) {
89 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
90 		ci->max_async_req = attr->max_async_req;
91 	}
92 
93 	scmi_xfer_put(handle, t);
94 	return ret;
95 }
96 
97 static int scmi_clock_attributes_get(const struct scmi_handle *handle,
98 				     u32 clk_id, struct scmi_clock_info *clk)
99 {
100 	int ret;
101 	struct scmi_xfer *t;
102 	struct scmi_msg_resp_clock_attributes *attr;
103 
104 	ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
105 				 sizeof(clk_id), sizeof(*attr), &t);
106 	if (ret)
107 		return ret;
108 
109 	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
110 	attr = t->rx.buf;
111 
112 	ret = scmi_do_xfer(handle, t);
113 	if (!ret)
114 		memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
115 	else
116 		clk->name[0] = '\0';
117 
118 	scmi_xfer_put(handle, t);
119 	return ret;
120 }
121 
122 static int
123 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
124 			      struct scmi_clock_info *clk)
125 {
126 	u64 *rate;
127 	int ret, cnt;
128 	bool rate_discrete = false;
129 	u32 tot_rate_cnt = 0, rates_flag;
130 	u16 num_returned, num_remaining;
131 	struct scmi_xfer *t;
132 	struct scmi_msg_clock_describe_rates *clk_desc;
133 	struct scmi_msg_resp_clock_describe_rates *rlist;
134 
135 	ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
136 				 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
137 	if (ret)
138 		return ret;
139 
140 	clk_desc = t->tx.buf;
141 	rlist = t->rx.buf;
142 
143 	do {
144 		clk_desc->id = cpu_to_le32(clk_id);
145 		/* Set the number of rates to be skipped/already read */
146 		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
147 
148 		ret = scmi_do_xfer(handle, t);
149 		if (ret)
150 			goto err;
151 
152 		rates_flag = le32_to_cpu(rlist->num_rates_flags);
153 		num_remaining = NUM_REMAINING(rates_flag);
154 		rate_discrete = RATE_DISCRETE(rates_flag);
155 		num_returned = NUM_RETURNED(rates_flag);
156 
157 		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
158 			dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
159 			break;
160 		}
161 
162 		if (!rate_discrete) {
163 			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
164 			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
165 			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
166 			dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
167 				clk->range.min_rate, clk->range.max_rate,
168 				clk->range.step_size);
169 			break;
170 		}
171 
172 		rate = &clk->list.rates[tot_rate_cnt];
173 		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
174 			*rate = RATE_TO_U64(rlist->rate[cnt]);
175 			dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
176 		}
177 
178 		tot_rate_cnt += num_returned;
179 		/*
180 		 * check for both returned and remaining to avoid infinite
181 		 * loop due to buggy firmware
182 		 */
183 	} while (num_returned && num_remaining);
184 
185 	if (rate_discrete)
186 		clk->list.num_rates = tot_rate_cnt;
187 
188 err:
189 	scmi_xfer_put(handle, t);
190 	return ret;
191 }
192 
193 static int
194 scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
195 {
196 	int ret;
197 	struct scmi_xfer *t;
198 
199 	ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
200 				 sizeof(__le32), sizeof(u64), &t);
201 	if (ret)
202 		return ret;
203 
204 	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
205 
206 	ret = scmi_do_xfer(handle, t);
207 	if (!ret) {
208 		__le32 *pval = t->rx.buf;
209 
210 		*value = le32_to_cpu(*pval);
211 		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
212 	}
213 
214 	scmi_xfer_put(handle, t);
215 	return ret;
216 }
217 
218 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
219 			       u32 config, u64 rate)
220 {
221 	int ret;
222 	struct scmi_xfer *t;
223 	struct scmi_clock_set_rate *cfg;
224 
225 	ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
226 				 sizeof(*cfg), 0, &t);
227 	if (ret)
228 		return ret;
229 
230 	cfg = t->tx.buf;
231 	cfg->flags = cpu_to_le32(config);
232 	cfg->id = cpu_to_le32(clk_id);
233 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
234 	cfg->value_high = cpu_to_le32(rate >> 32);
235 
236 	ret = scmi_do_xfer(handle, t);
237 
238 	scmi_xfer_put(handle, t);
239 	return ret;
240 }
241 
242 static int
243 scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
244 {
245 	int ret;
246 	struct scmi_xfer *t;
247 	struct scmi_clock_set_config *cfg;
248 
249 	ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
250 				 sizeof(*cfg), 0, &t);
251 	if (ret)
252 		return ret;
253 
254 	cfg = t->tx.buf;
255 	cfg->id = cpu_to_le32(clk_id);
256 	cfg->attributes = cpu_to_le32(config);
257 
258 	ret = scmi_do_xfer(handle, t);
259 
260 	scmi_xfer_put(handle, t);
261 	return ret;
262 }
263 
264 static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
265 {
266 	return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
267 }
268 
269 static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
270 {
271 	return scmi_clock_config_set(handle, clk_id, 0);
272 }
273 
274 static int scmi_clock_count_get(const struct scmi_handle *handle)
275 {
276 	struct clock_info *ci = handle->clk_priv;
277 
278 	return ci->num_clocks;
279 }
280 
281 static const struct scmi_clock_info *
282 scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
283 {
284 	struct clock_info *ci = handle->clk_priv;
285 	struct scmi_clock_info *clk = ci->clk + clk_id;
286 
287 	if (!clk->name[0])
288 		return NULL;
289 
290 	return clk;
291 }
292 
293 static struct scmi_clk_ops clk_ops = {
294 	.count_get = scmi_clock_count_get,
295 	.info_get = scmi_clock_info_get,
296 	.rate_get = scmi_clock_rate_get,
297 	.rate_set = scmi_clock_rate_set,
298 	.enable = scmi_clock_enable,
299 	.disable = scmi_clock_disable,
300 };
301 
302 static int scmi_clock_protocol_init(struct scmi_handle *handle)
303 {
304 	u32 version;
305 	int clkid, ret;
306 	struct clock_info *cinfo;
307 
308 	scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
309 
310 	dev_dbg(handle->dev, "Clock Version %d.%d\n",
311 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
312 
313 	cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
314 	if (!cinfo)
315 		return -ENOMEM;
316 
317 	scmi_clock_protocol_attributes_get(handle, cinfo);
318 
319 	cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
320 				  sizeof(*cinfo->clk), GFP_KERNEL);
321 	if (!cinfo->clk)
322 		return -ENOMEM;
323 
324 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
325 		struct scmi_clock_info *clk = cinfo->clk + clkid;
326 
327 		ret = scmi_clock_attributes_get(handle, clkid, clk);
328 		if (!ret)
329 			scmi_clock_describe_rates_get(handle, clkid, clk);
330 	}
331 
332 	handle->clk_ops = &clk_ops;
333 	handle->clk_priv = cinfo;
334 
335 	return 0;
336 }
337 
338 static int __init scmi_clock_init(void)
339 {
340 	return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
341 				      &scmi_clock_protocol_init);
342 }
343 subsys_initcall(scmi_clock_init);
344