xref: /linux/drivers/firmware/arm_scmi/clock.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11 
12 #include "protocols.h"
13 #include "notify.h"
14 
15 /* Updated only after ALL the mandatory features for that version are merged */
16 #define SCMI_PROTOCOL_SUPPORTED_VERSION		0x30000
17 
18 enum scmi_clock_protocol_cmd {
19 	CLOCK_ATTRIBUTES = 0x3,
20 	CLOCK_DESCRIBE_RATES = 0x4,
21 	CLOCK_RATE_SET = 0x5,
22 	CLOCK_RATE_GET = 0x6,
23 	CLOCK_CONFIG_SET = 0x7,
24 	CLOCK_NAME_GET = 0x8,
25 	CLOCK_RATE_NOTIFY = 0x9,
26 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
27 	CLOCK_CONFIG_GET = 0xB,
28 	CLOCK_POSSIBLE_PARENTS_GET = 0xC,
29 	CLOCK_PARENT_SET = 0xD,
30 	CLOCK_PARENT_GET = 0xE,
31 	CLOCK_GET_PERMISSIONS = 0xF,
32 };
33 
34 #define CLOCK_STATE_CONTROL_ALLOWED	BIT(31)
35 #define CLOCK_PARENT_CONTROL_ALLOWED	BIT(30)
36 #define CLOCK_RATE_CONTROL_ALLOWED	BIT(29)
37 
38 enum clk_state {
39 	CLK_STATE_DISABLE,
40 	CLK_STATE_ENABLE,
41 	CLK_STATE_RESERVED,
42 	CLK_STATE_UNCHANGED,
43 };
44 
45 struct scmi_msg_resp_clock_protocol_attributes {
46 	__le16 num_clocks;
47 	u8 max_async_req;
48 	u8 reserved;
49 };
50 
51 struct scmi_msg_resp_clock_attributes {
52 	__le32 attributes;
53 #define SUPPORTS_RATE_CHANGED_NOTIF(x)		((x) & BIT(31))
54 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x)	((x) & BIT(30))
55 #define SUPPORTS_EXTENDED_NAMES(x)		((x) & BIT(29))
56 #define SUPPORTS_PARENT_CLOCK(x)		((x) & BIT(28))
57 #define SUPPORTS_EXTENDED_CONFIG(x)		((x) & BIT(27))
58 #define SUPPORTS_GET_PERMISSIONS(x)		((x) & BIT(1))
59 	u8 name[SCMI_SHORT_NAME_MAX_SIZE];
60 	__le32 clock_enable_latency;
61 };
62 
63 struct scmi_msg_clock_possible_parents {
64 	__le32 id;
65 	__le32 skip_parents;
66 };
67 
68 struct scmi_msg_resp_clock_possible_parents {
69 	__le32 num_parent_flags;
70 #define NUM_PARENTS_RETURNED(x)		((x) & 0xff)
71 #define NUM_PARENTS_REMAINING(x)	((x) >> 24)
72 	__le32 possible_parents[];
73 };
74 
75 struct scmi_msg_clock_set_parent {
76 	__le32 id;
77 	__le32 parent_id;
78 };
79 
80 struct scmi_msg_clock_config_set {
81 	__le32 id;
82 	__le32 attributes;
83 };
84 
85 /* Valid only from SCMI clock v2.1 */
86 struct scmi_msg_clock_config_set_v2 {
87 	__le32 id;
88 	__le32 attributes;
89 #define NULL_OEM_TYPE			0
90 #define REGMASK_OEM_TYPE_SET		GENMASK(23, 16)
91 #define REGMASK_CLK_STATE		GENMASK(1, 0)
92 	__le32 oem_config_val;
93 };
94 
95 struct scmi_msg_clock_config_get {
96 	__le32 id;
97 	__le32 flags;
98 #define REGMASK_OEM_TYPE_GET		GENMASK(7, 0)
99 };
100 
101 struct scmi_msg_resp_clock_config_get {
102 	__le32 attributes;
103 	__le32 config;
104 #define IS_CLK_ENABLED(x)		le32_get_bits((x), BIT(0))
105 	__le32 oem_config_val;
106 };
107 
108 struct scmi_msg_clock_describe_rates {
109 	__le32 id;
110 	__le32 rate_index;
111 };
112 
113 struct scmi_msg_resp_clock_describe_rates {
114 	__le32 num_rates_flags;
115 #define NUM_RETURNED(x)		((x) & 0xfff)
116 #define RATE_DISCRETE(x)	!((x) & BIT(12))
117 #define NUM_REMAINING(x)	((x) >> 16)
118 	struct {
119 		__le32 value_low;
120 		__le32 value_high;
121 	} rate[];
122 #define RATE_TO_U64(X)		\
123 ({				\
124 	typeof(X) x = (X);	\
125 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
126 })
127 };
128 
129 struct scmi_clock_set_rate {
130 	__le32 flags;
131 #define CLOCK_SET_ASYNC		BIT(0)
132 #define CLOCK_SET_IGNORE_RESP	BIT(1)
133 #define CLOCK_SET_ROUND_UP	BIT(2)
134 #define CLOCK_SET_ROUND_AUTO	BIT(3)
135 	__le32 id;
136 	__le32 value_low;
137 	__le32 value_high;
138 };
139 
140 struct scmi_msg_resp_set_rate_complete {
141 	__le32 id;
142 	__le32 rate_low;
143 	__le32 rate_high;
144 };
145 
146 struct scmi_msg_clock_rate_notify {
147 	__le32 clk_id;
148 	__le32 notify_enable;
149 };
150 
151 struct scmi_clock_rate_notify_payld {
152 	__le32 agent_id;
153 	__le32 clock_id;
154 	__le32 rate_low;
155 	__le32 rate_high;
156 };
157 
158 struct clock_info {
159 	u32 version;
160 	int num_clocks;
161 	int max_async_req;
162 	bool notify_rate_changed_cmd;
163 	bool notify_rate_change_requested_cmd;
164 	atomic_t cur_async_req;
165 	struct scmi_clock_info *clk;
166 	int (*clock_config_set)(const struct scmi_protocol_handle *ph,
167 				u32 clk_id, enum clk_state state,
168 				enum scmi_clock_oem_config oem_type,
169 				u32 oem_val, bool atomic);
170 	int (*clock_config_get)(const struct scmi_protocol_handle *ph,
171 				u32 clk_id, enum scmi_clock_oem_config oem_type,
172 				u32 *attributes, bool *enabled, u32 *oem_val,
173 				bool atomic);
174 };
175 
176 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
177 	CLOCK_RATE_NOTIFY,
178 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
179 };
180 
181 static inline struct scmi_clock_info *
182 scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
183 {
184 	if (clk_id >= ci->num_clocks)
185 		return ERR_PTR(-EINVAL);
186 
187 	return ci->clk + clk_id;
188 }
189 
190 static int
191 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
192 				   struct clock_info *ci)
193 {
194 	int ret;
195 	struct scmi_xfer *t;
196 	struct scmi_msg_resp_clock_protocol_attributes *attr;
197 
198 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
199 				      0, sizeof(*attr), &t);
200 	if (ret)
201 		return ret;
202 
203 	attr = t->rx.buf;
204 
205 	ret = ph->xops->do_xfer(ph, t);
206 	if (!ret) {
207 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
208 		ci->max_async_req = attr->max_async_req;
209 	}
210 
211 	ph->xops->xfer_put(ph, t);
212 
213 	if (!ret) {
214 		if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
215 			ci->notify_rate_changed_cmd = true;
216 
217 		if (!ph->hops->protocol_msg_check(ph,
218 						  CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
219 						  NULL))
220 			ci->notify_rate_change_requested_cmd = true;
221 	}
222 
223 	return ret;
224 }
225 
226 struct scmi_clk_ipriv {
227 	struct device *dev;
228 	u32 clk_id;
229 	struct scmi_clock_info *clk;
230 };
231 
232 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
233 						      const void *priv)
234 {
235 	struct scmi_msg_clock_possible_parents *msg = message;
236 	const struct scmi_clk_ipriv *p = priv;
237 
238 	msg->id = cpu_to_le32(p->clk_id);
239 	/* Set the number of OPPs to be skipped/already read */
240 	msg->skip_parents = cpu_to_le32(desc_index);
241 }
242 
243 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
244 						  const void *response, void *priv)
245 {
246 	const struct scmi_msg_resp_clock_possible_parents *r = response;
247 	struct scmi_clk_ipriv *p = priv;
248 	struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
249 	u32 flags;
250 
251 	flags = le32_to_cpu(r->num_parent_flags);
252 	st->num_returned = NUM_PARENTS_RETURNED(flags);
253 	st->num_remaining = NUM_PARENTS_REMAINING(flags);
254 
255 	/*
256 	 * num parents is not declared previously anywhere so we
257 	 * assume it's returned+remaining on first call.
258 	 */
259 	if (!st->max_resources) {
260 		p->clk->num_parents = st->num_returned + st->num_remaining;
261 		p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
262 					       sizeof(*p->clk->parents),
263 					       GFP_KERNEL);
264 		if (!p->clk->parents) {
265 			p->clk->num_parents = 0;
266 			return -ENOMEM;
267 		}
268 		st->max_resources = st->num_returned + st->num_remaining;
269 	}
270 
271 	return 0;
272 }
273 
274 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
275 						      const void *response,
276 						      struct scmi_iterator_state *st,
277 						      void *priv)
278 {
279 	const struct scmi_msg_resp_clock_possible_parents *r = response;
280 	struct scmi_clk_ipriv *p = priv;
281 
282 	u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
283 
284 	*parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
285 
286 	return 0;
287 }
288 
289 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
290 				       struct scmi_clock_info *clk)
291 {
292 	struct scmi_iterator_ops ops = {
293 		.prepare_message = iter_clk_possible_parents_prepare_message,
294 		.update_state = iter_clk_possible_parents_update_state,
295 		.process_response = iter_clk_possible_parents_process_response,
296 	};
297 
298 	struct scmi_clk_ipriv ppriv = {
299 		.clk_id = clk_id,
300 		.clk = clk,
301 		.dev = ph->dev,
302 	};
303 	void *iter;
304 	int ret;
305 
306 	iter = ph->hops->iter_response_init(ph, &ops, 0,
307 					    CLOCK_POSSIBLE_PARENTS_GET,
308 					    sizeof(struct scmi_msg_clock_possible_parents),
309 					    &ppriv);
310 	if (IS_ERR(iter))
311 		return PTR_ERR(iter);
312 
313 	ret = ph->hops->iter_response_run(iter);
314 
315 	return ret;
316 }
317 
318 static int
319 scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
320 			   struct scmi_clock_info *clk)
321 {
322 	struct scmi_xfer *t;
323 	u32 perm;
324 	int ret;
325 
326 	ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
327 				      sizeof(clk_id), sizeof(perm), &t);
328 	if (ret)
329 		return ret;
330 
331 	put_unaligned_le32(clk_id, t->tx.buf);
332 
333 	ret = ph->xops->do_xfer(ph, t);
334 	if (!ret) {
335 		perm = get_unaligned_le32(t->rx.buf);
336 
337 		clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
338 		clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
339 		clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
340 	}
341 
342 	ph->xops->xfer_put(ph, t);
343 
344 	return ret;
345 }
346 
347 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
348 				     u32 clk_id, struct clock_info *cinfo,
349 				     u32 version)
350 {
351 	int ret;
352 	u32 attributes;
353 	struct scmi_xfer *t;
354 	struct scmi_msg_resp_clock_attributes *attr;
355 	struct scmi_clock_info *clk = cinfo->clk + clk_id;
356 
357 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
358 				      sizeof(clk_id), sizeof(*attr), &t);
359 	if (ret)
360 		return ret;
361 
362 	put_unaligned_le32(clk_id, t->tx.buf);
363 	attr = t->rx.buf;
364 
365 	ret = ph->xops->do_xfer(ph, t);
366 	if (!ret) {
367 		u32 latency = 0;
368 
369 		attributes = le32_to_cpu(attr->attributes);
370 		strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
371 		/* clock_enable_latency field is present only since SCMI v3.1 */
372 		if (PROTOCOL_REV_MAJOR(version) >= 0x2)
373 			latency = le32_to_cpu(attr->clock_enable_latency);
374 		clk->enable_latency = latency ? : U32_MAX;
375 	}
376 
377 	ph->xops->xfer_put(ph, t);
378 
379 	/*
380 	 * If supported overwrite short name with the extended one;
381 	 * on error just carry on and use already provided short name.
382 	 */
383 	if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
384 		if (SUPPORTS_EXTENDED_NAMES(attributes))
385 			ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
386 						    NULL, clk->name,
387 						    SCMI_MAX_STR_SIZE);
388 
389 		if (cinfo->notify_rate_changed_cmd &&
390 		    SUPPORTS_RATE_CHANGED_NOTIF(attributes))
391 			clk->rate_changed_notifications = true;
392 		if (cinfo->notify_rate_change_requested_cmd &&
393 		    SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
394 			clk->rate_change_requested_notifications = true;
395 		if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
396 			if (SUPPORTS_PARENT_CLOCK(attributes))
397 				scmi_clock_possible_parents(ph, clk_id, clk);
398 			if (SUPPORTS_GET_PERMISSIONS(attributes))
399 				scmi_clock_get_permissions(ph, clk_id, clk);
400 			if (SUPPORTS_EXTENDED_CONFIG(attributes))
401 				clk->extended_config = true;
402 		}
403 	}
404 
405 	return ret;
406 }
407 
408 static int rate_cmp_func(const void *_r1, const void *_r2)
409 {
410 	const u64 *r1 = _r1, *r2 = _r2;
411 
412 	if (*r1 < *r2)
413 		return -1;
414 	else if (*r1 == *r2)
415 		return 0;
416 	else
417 		return 1;
418 }
419 
420 static void iter_clk_describe_prepare_message(void *message,
421 					      const unsigned int desc_index,
422 					      const void *priv)
423 {
424 	struct scmi_msg_clock_describe_rates *msg = message;
425 	const struct scmi_clk_ipriv *p = priv;
426 
427 	msg->id = cpu_to_le32(p->clk_id);
428 	/* Set the number of rates to be skipped/already read */
429 	msg->rate_index = cpu_to_le32(desc_index);
430 }
431 
432 static int
433 iter_clk_describe_update_state(struct scmi_iterator_state *st,
434 			       const void *response, void *priv)
435 {
436 	u32 flags;
437 	struct scmi_clk_ipriv *p = priv;
438 	const struct scmi_msg_resp_clock_describe_rates *r = response;
439 
440 	flags = le32_to_cpu(r->num_rates_flags);
441 	st->num_remaining = NUM_REMAINING(flags);
442 	st->num_returned = NUM_RETURNED(flags);
443 	p->clk->rate_discrete = RATE_DISCRETE(flags);
444 
445 	/* Warn about out of spec replies ... */
446 	if (!p->clk->rate_discrete &&
447 	    (st->num_returned != 3 || st->num_remaining != 0)) {
448 		dev_warn(p->dev,
449 			 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
450 			 p->clk->name, st->num_returned, st->num_remaining,
451 			 st->rx_len);
452 
453 		/*
454 		 * A known quirk: a triplet is returned but num_returned != 3
455 		 * Check for a safe payload size and fix.
456 		 */
457 		if (st->num_returned != 3 && st->num_remaining == 0 &&
458 		    st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
459 			st->num_returned = 3;
460 			st->num_remaining = 0;
461 		} else {
462 			dev_err(p->dev,
463 				"Cannot fix out-of-spec reply !\n");
464 			return -EPROTO;
465 		}
466 	}
467 
468 	return 0;
469 }
470 
471 static int
472 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
473 				   const void *response,
474 				   struct scmi_iterator_state *st, void *priv)
475 {
476 	int ret = 0;
477 	struct scmi_clk_ipriv *p = priv;
478 	const struct scmi_msg_resp_clock_describe_rates *r = response;
479 
480 	if (!p->clk->rate_discrete) {
481 		switch (st->desc_index + st->loop_idx) {
482 		case 0:
483 			p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
484 			break;
485 		case 1:
486 			p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
487 			break;
488 		case 2:
489 			p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
490 			break;
491 		default:
492 			ret = -EINVAL;
493 			break;
494 		}
495 	} else {
496 		u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
497 
498 		*rate = RATE_TO_U64(r->rate[st->loop_idx]);
499 		p->clk->list.num_rates++;
500 	}
501 
502 	return ret;
503 }
504 
505 static int
506 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
507 			      struct scmi_clock_info *clk)
508 {
509 	int ret;
510 	void *iter;
511 	struct scmi_iterator_ops ops = {
512 		.prepare_message = iter_clk_describe_prepare_message,
513 		.update_state = iter_clk_describe_update_state,
514 		.process_response = iter_clk_describe_process_response,
515 	};
516 	struct scmi_clk_ipriv cpriv = {
517 		.clk_id = clk_id,
518 		.clk = clk,
519 		.dev = ph->dev,
520 	};
521 
522 	iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
523 					    CLOCK_DESCRIBE_RATES,
524 					    sizeof(struct scmi_msg_clock_describe_rates),
525 					    &cpriv);
526 	if (IS_ERR(iter))
527 		return PTR_ERR(iter);
528 
529 	ret = ph->hops->iter_response_run(iter);
530 	if (ret)
531 		return ret;
532 
533 	if (!clk->rate_discrete) {
534 		dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
535 			clk->range.min_rate, clk->range.max_rate,
536 			clk->range.step_size);
537 	} else if (clk->list.num_rates) {
538 		sort(clk->list.rates, clk->list.num_rates,
539 		     sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
540 	}
541 
542 	return ret;
543 }
544 
545 static int
546 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
547 		    u32 clk_id, u64 *value)
548 {
549 	int ret;
550 	struct scmi_xfer *t;
551 
552 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
553 				      sizeof(__le32), sizeof(u64), &t);
554 	if (ret)
555 		return ret;
556 
557 	put_unaligned_le32(clk_id, t->tx.buf);
558 
559 	ret = ph->xops->do_xfer(ph, t);
560 	if (!ret)
561 		*value = get_unaligned_le64(t->rx.buf);
562 
563 	ph->xops->xfer_put(ph, t);
564 	return ret;
565 }
566 
567 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
568 			       u32 clk_id, u64 rate)
569 {
570 	int ret;
571 	u32 flags = 0;
572 	struct scmi_xfer *t;
573 	struct scmi_clock_set_rate *cfg;
574 	struct clock_info *ci = ph->get_priv(ph);
575 	struct scmi_clock_info *clk;
576 
577 	clk = scmi_clock_domain_lookup(ci, clk_id);
578 	if (IS_ERR(clk))
579 		return PTR_ERR(clk);
580 
581 	if (clk->rate_ctrl_forbidden)
582 		return -EACCES;
583 
584 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
585 	if (ret)
586 		return ret;
587 
588 	if (ci->max_async_req &&
589 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
590 		flags |= CLOCK_SET_ASYNC;
591 
592 	cfg = t->tx.buf;
593 	cfg->flags = cpu_to_le32(flags);
594 	cfg->id = cpu_to_le32(clk_id);
595 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
596 	cfg->value_high = cpu_to_le32(rate >> 32);
597 
598 	if (flags & CLOCK_SET_ASYNC) {
599 		ret = ph->xops->do_xfer_with_response(ph, t);
600 		if (!ret) {
601 			struct scmi_msg_resp_set_rate_complete *resp;
602 
603 			resp = t->rx.buf;
604 			if (le32_to_cpu(resp->id) == clk_id)
605 				dev_dbg(ph->dev,
606 					"Clk ID %d set async to %llu\n", clk_id,
607 					get_unaligned_le64(&resp->rate_low));
608 			else
609 				ret = -EPROTO;
610 		}
611 	} else {
612 		ret = ph->xops->do_xfer(ph, t);
613 	}
614 
615 	if (ci->max_async_req)
616 		atomic_dec(&ci->cur_async_req);
617 
618 	ph->xops->xfer_put(ph, t);
619 	return ret;
620 }
621 
622 static int
623 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
624 		      enum clk_state state,
625 		      enum scmi_clock_oem_config __unused0, u32 __unused1,
626 		      bool atomic)
627 {
628 	int ret;
629 	struct scmi_xfer *t;
630 	struct scmi_msg_clock_config_set *cfg;
631 
632 	if (state >= CLK_STATE_RESERVED)
633 		return -EINVAL;
634 
635 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
636 				      sizeof(*cfg), 0, &t);
637 	if (ret)
638 		return ret;
639 
640 	t->hdr.poll_completion = atomic;
641 
642 	cfg = t->tx.buf;
643 	cfg->id = cpu_to_le32(clk_id);
644 	cfg->attributes = cpu_to_le32(state);
645 
646 	ret = ph->xops->do_xfer(ph, t);
647 
648 	ph->xops->xfer_put(ph, t);
649 	return ret;
650 }
651 
652 static int
653 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
654 		      u32 parent_id)
655 {
656 	int ret;
657 	struct scmi_xfer *t;
658 	struct scmi_msg_clock_set_parent *cfg;
659 	struct clock_info *ci = ph->get_priv(ph);
660 	struct scmi_clock_info *clk;
661 
662 	clk = scmi_clock_domain_lookup(ci, clk_id);
663 	if (IS_ERR(clk))
664 		return PTR_ERR(clk);
665 
666 	if (parent_id >= clk->num_parents)
667 		return -EINVAL;
668 
669 	if (clk->parent_ctrl_forbidden)
670 		return -EACCES;
671 
672 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
673 				      sizeof(*cfg), 0, &t);
674 	if (ret)
675 		return ret;
676 
677 	t->hdr.poll_completion = false;
678 
679 	cfg = t->tx.buf;
680 	cfg->id = cpu_to_le32(clk_id);
681 	cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
682 
683 	ret = ph->xops->do_xfer(ph, t);
684 
685 	ph->xops->xfer_put(ph, t);
686 
687 	return ret;
688 }
689 
690 static int
691 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
692 		      u32 *parent_id)
693 {
694 	int ret;
695 	struct scmi_xfer *t;
696 
697 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
698 				      sizeof(__le32), sizeof(u32), &t);
699 	if (ret)
700 		return ret;
701 
702 	put_unaligned_le32(clk_id, t->tx.buf);
703 
704 	ret = ph->xops->do_xfer(ph, t);
705 	if (!ret)
706 		*parent_id = get_unaligned_le32(t->rx.buf);
707 
708 	ph->xops->xfer_put(ph, t);
709 	return ret;
710 }
711 
712 /* For SCMI clock v3.0 and onwards */
713 static int
714 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
715 			 enum clk_state state,
716 			 enum scmi_clock_oem_config oem_type, u32 oem_val,
717 			 bool atomic)
718 {
719 	int ret;
720 	u32 attrs;
721 	struct scmi_xfer *t;
722 	struct scmi_msg_clock_config_set_v2 *cfg;
723 
724 	if (state == CLK_STATE_RESERVED ||
725 	    (!oem_type && state == CLK_STATE_UNCHANGED))
726 		return -EINVAL;
727 
728 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
729 				      sizeof(*cfg), 0, &t);
730 	if (ret)
731 		return ret;
732 
733 	t->hdr.poll_completion = atomic;
734 
735 	attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
736 		 FIELD_PREP(REGMASK_CLK_STATE, state);
737 
738 	cfg = t->tx.buf;
739 	cfg->id = cpu_to_le32(clk_id);
740 	cfg->attributes = cpu_to_le32(attrs);
741 	/* Clear in any case */
742 	cfg->oem_config_val = cpu_to_le32(0);
743 	if (oem_type)
744 		cfg->oem_config_val = cpu_to_le32(oem_val);
745 
746 	ret = ph->xops->do_xfer(ph, t);
747 
748 	ph->xops->xfer_put(ph, t);
749 	return ret;
750 }
751 
752 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
753 			     bool atomic)
754 {
755 	struct clock_info *ci = ph->get_priv(ph);
756 	struct scmi_clock_info *clk;
757 
758 	clk = scmi_clock_domain_lookup(ci, clk_id);
759 	if (IS_ERR(clk))
760 		return PTR_ERR(clk);
761 
762 	if (clk->state_ctrl_forbidden)
763 		return -EACCES;
764 
765 	return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
766 				    NULL_OEM_TYPE, 0, atomic);
767 }
768 
769 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
770 			      bool atomic)
771 {
772 	struct clock_info *ci = ph->get_priv(ph);
773 	struct scmi_clock_info *clk;
774 
775 	clk = scmi_clock_domain_lookup(ci, clk_id);
776 	if (IS_ERR(clk))
777 		return PTR_ERR(clk);
778 
779 	if (clk->state_ctrl_forbidden)
780 		return -EACCES;
781 
782 	return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
783 				    NULL_OEM_TYPE, 0, atomic);
784 }
785 
786 /* For SCMI clock v3.0 and onwards */
787 static int
788 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
789 			 enum scmi_clock_oem_config oem_type, u32 *attributes,
790 			 bool *enabled, u32 *oem_val, bool atomic)
791 {
792 	int ret;
793 	u32 flags;
794 	struct scmi_xfer *t;
795 	struct scmi_msg_clock_config_get *cfg;
796 
797 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
798 				      sizeof(*cfg), 0, &t);
799 	if (ret)
800 		return ret;
801 
802 	t->hdr.poll_completion = atomic;
803 
804 	flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
805 
806 	cfg = t->tx.buf;
807 	cfg->id = cpu_to_le32(clk_id);
808 	cfg->flags = cpu_to_le32(flags);
809 
810 	ret = ph->xops->do_xfer(ph, t);
811 	if (!ret) {
812 		struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
813 
814 		if (attributes)
815 			*attributes = le32_to_cpu(resp->attributes);
816 
817 		if (enabled)
818 			*enabled = IS_CLK_ENABLED(resp->config);
819 
820 		if (oem_val && oem_type)
821 			*oem_val = le32_to_cpu(resp->oem_config_val);
822 	}
823 
824 	ph->xops->xfer_put(ph, t);
825 
826 	return ret;
827 }
828 
829 static int
830 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
831 		      enum scmi_clock_oem_config oem_type, u32 *attributes,
832 		      bool *enabled, u32 *oem_val, bool atomic)
833 {
834 	int ret;
835 	struct scmi_xfer *t;
836 	struct scmi_msg_resp_clock_attributes *resp;
837 
838 	if (!enabled)
839 		return -EINVAL;
840 
841 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
842 				      sizeof(clk_id), sizeof(*resp), &t);
843 	if (ret)
844 		return ret;
845 
846 	t->hdr.poll_completion = atomic;
847 	put_unaligned_le32(clk_id, t->tx.buf);
848 	resp = t->rx.buf;
849 
850 	ret = ph->xops->do_xfer(ph, t);
851 	if (!ret)
852 		*enabled = IS_CLK_ENABLED(resp->attributes);
853 
854 	ph->xops->xfer_put(ph, t);
855 
856 	return ret;
857 }
858 
859 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
860 				u32 clk_id, bool *enabled, bool atomic)
861 {
862 	struct clock_info *ci = ph->get_priv(ph);
863 
864 	return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
865 				    enabled, NULL, atomic);
866 }
867 
868 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
869 				     u32 clk_id,
870 				     enum scmi_clock_oem_config oem_type,
871 				     u32 oem_val, bool atomic)
872 {
873 	struct clock_info *ci = ph->get_priv(ph);
874 	struct scmi_clock_info *clk;
875 
876 	clk = scmi_clock_domain_lookup(ci, clk_id);
877 	if (IS_ERR(clk))
878 		return PTR_ERR(clk);
879 
880 	if (!clk->extended_config)
881 		return -EOPNOTSUPP;
882 
883 	return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
884 				    oem_type, oem_val, atomic);
885 }
886 
887 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
888 				     u32 clk_id,
889 				     enum scmi_clock_oem_config oem_type,
890 				     u32 *oem_val, u32 *attributes, bool atomic)
891 {
892 	struct clock_info *ci = ph->get_priv(ph);
893 	struct scmi_clock_info *clk;
894 
895 	clk = scmi_clock_domain_lookup(ci, clk_id);
896 	if (IS_ERR(clk))
897 		return PTR_ERR(clk);
898 
899 	if (!clk->extended_config)
900 		return -EOPNOTSUPP;
901 
902 	return ci->clock_config_get(ph, clk_id, oem_type, attributes,
903 				    NULL, oem_val, atomic);
904 }
905 
906 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
907 {
908 	struct clock_info *ci = ph->get_priv(ph);
909 
910 	return ci->num_clocks;
911 }
912 
913 static const struct scmi_clock_info *
914 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
915 {
916 	struct scmi_clock_info *clk;
917 	struct clock_info *ci = ph->get_priv(ph);
918 
919 	clk = scmi_clock_domain_lookup(ci, clk_id);
920 	if (IS_ERR(clk))
921 		return NULL;
922 
923 	if (!clk->name[0])
924 		return NULL;
925 
926 	return clk;
927 }
928 
929 static const struct scmi_clk_proto_ops clk_proto_ops = {
930 	.count_get = scmi_clock_count_get,
931 	.info_get = scmi_clock_info_get,
932 	.rate_get = scmi_clock_rate_get,
933 	.rate_set = scmi_clock_rate_set,
934 	.enable = scmi_clock_enable,
935 	.disable = scmi_clock_disable,
936 	.state_get = scmi_clock_state_get,
937 	.config_oem_get = scmi_clock_config_oem_get,
938 	.config_oem_set = scmi_clock_config_oem_set,
939 	.parent_set = scmi_clock_set_parent,
940 	.parent_get = scmi_clock_get_parent,
941 };
942 
943 static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
944 				      u8 evt_id, u32 src_id)
945 {
946 	bool supported;
947 	struct scmi_clock_info *clk;
948 	struct clock_info *ci = ph->get_priv(ph);
949 
950 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
951 		return false;
952 
953 	clk = scmi_clock_domain_lookup(ci, src_id);
954 	if (IS_ERR(clk))
955 		return false;
956 
957 	if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
958 		supported = clk->rate_changed_notifications;
959 	else
960 		supported = clk->rate_change_requested_notifications;
961 
962 	return supported;
963 }
964 
965 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
966 				u32 clk_id, int message_id, bool enable)
967 {
968 	int ret;
969 	struct scmi_xfer *t;
970 	struct scmi_msg_clock_rate_notify *notify;
971 
972 	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
973 	if (ret)
974 		return ret;
975 
976 	notify = t->tx.buf;
977 	notify->clk_id = cpu_to_le32(clk_id);
978 	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
979 
980 	ret = ph->xops->do_xfer(ph, t);
981 
982 	ph->xops->xfer_put(ph, t);
983 	return ret;
984 }
985 
986 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
987 				       u8 evt_id, u32 src_id, bool enable)
988 {
989 	int ret, cmd_id;
990 
991 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
992 		return -EINVAL;
993 
994 	cmd_id = evt_2_cmd[evt_id];
995 	ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
996 	if (ret)
997 		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
998 			 evt_id, src_id, ret);
999 
1000 	return ret;
1001 }
1002 
1003 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
1004 					 u8 evt_id, ktime_t timestamp,
1005 					 const void *payld, size_t payld_sz,
1006 					 void *report, u32 *src_id)
1007 {
1008 	const struct scmi_clock_rate_notify_payld *p = payld;
1009 	struct scmi_clock_rate_notif_report *r = report;
1010 
1011 	if (sizeof(*p) != payld_sz ||
1012 	    (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
1013 	     evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
1014 		return NULL;
1015 
1016 	r->timestamp = timestamp;
1017 	r->agent_id = le32_to_cpu(p->agent_id);
1018 	r->clock_id = le32_to_cpu(p->clock_id);
1019 	r->rate = get_unaligned_le64(&p->rate_low);
1020 	*src_id = r->clock_id;
1021 
1022 	return r;
1023 }
1024 
1025 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
1026 {
1027 	struct clock_info *ci = ph->get_priv(ph);
1028 
1029 	if (!ci)
1030 		return -EINVAL;
1031 
1032 	return ci->num_clocks;
1033 }
1034 
1035 static const struct scmi_event clk_events[] = {
1036 	{
1037 		.id = SCMI_EVENT_CLOCK_RATE_CHANGED,
1038 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1039 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1040 	},
1041 	{
1042 		.id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
1043 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1044 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1045 	},
1046 };
1047 
1048 static const struct scmi_event_ops clk_event_ops = {
1049 	.is_notify_supported = scmi_clk_notify_supported,
1050 	.get_num_sources = scmi_clk_get_num_sources,
1051 	.set_notify_enabled = scmi_clk_set_notify_enabled,
1052 	.fill_custom_report = scmi_clk_fill_custom_report,
1053 };
1054 
1055 static const struct scmi_protocol_events clk_protocol_events = {
1056 	.queue_sz = SCMI_PROTO_QUEUE_SZ,
1057 	.ops = &clk_event_ops,
1058 	.evts = clk_events,
1059 	.num_events = ARRAY_SIZE(clk_events),
1060 };
1061 
1062 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
1063 {
1064 	u32 version;
1065 	int clkid, ret;
1066 	struct clock_info *cinfo;
1067 
1068 	ret = ph->xops->version_get(ph, &version);
1069 	if (ret)
1070 		return ret;
1071 
1072 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
1073 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
1074 
1075 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
1076 	if (!cinfo)
1077 		return -ENOMEM;
1078 
1079 	ret = scmi_clock_protocol_attributes_get(ph, cinfo);
1080 	if (ret)
1081 		return ret;
1082 
1083 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
1084 				  sizeof(*cinfo->clk), GFP_KERNEL);
1085 	if (!cinfo->clk)
1086 		return -ENOMEM;
1087 
1088 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
1089 		struct scmi_clock_info *clk = cinfo->clk + clkid;
1090 
1091 		ret = scmi_clock_attributes_get(ph, clkid, cinfo, version);
1092 		if (!ret)
1093 			scmi_clock_describe_rates_get(ph, clkid, clk);
1094 	}
1095 
1096 	if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
1097 		cinfo->clock_config_set = scmi_clock_config_set_v2;
1098 		cinfo->clock_config_get = scmi_clock_config_get_v2;
1099 	} else {
1100 		cinfo->clock_config_set = scmi_clock_config_set;
1101 		cinfo->clock_config_get = scmi_clock_config_get;
1102 	}
1103 
1104 	cinfo->version = version;
1105 	return ph->set_priv(ph, cinfo, version);
1106 }
1107 
1108 static const struct scmi_protocol scmi_clock = {
1109 	.id = SCMI_PROTOCOL_CLOCK,
1110 	.owner = THIS_MODULE,
1111 	.instance_init = &scmi_clock_protocol_init,
1112 	.ops = &clk_proto_ops,
1113 	.events = &clk_protocol_events,
1114 	.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1115 };
1116 
1117 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
1118