xref: /linux/drivers/firmware/arm_scmi/clock.c (revision 2672031b20f6681514bef14ddcfe8c62c2757d11)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11 
12 #include "protocols.h"
13 #include "notify.h"
14 
15 /* Updated only after ALL the mandatory features for that version are merged */
16 #define SCMI_PROTOCOL_SUPPORTED_VERSION		0x20001
17 
18 enum scmi_clock_protocol_cmd {
19 	CLOCK_ATTRIBUTES = 0x3,
20 	CLOCK_DESCRIBE_RATES = 0x4,
21 	CLOCK_RATE_SET = 0x5,
22 	CLOCK_RATE_GET = 0x6,
23 	CLOCK_CONFIG_SET = 0x7,
24 	CLOCK_NAME_GET = 0x8,
25 	CLOCK_RATE_NOTIFY = 0x9,
26 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
27 	CLOCK_CONFIG_GET = 0xB,
28 	CLOCK_POSSIBLE_PARENTS_GET = 0xC,
29 	CLOCK_PARENT_SET = 0xD,
30 	CLOCK_PARENT_GET = 0xE,
31 };
32 
33 enum clk_state {
34 	CLK_STATE_DISABLE,
35 	CLK_STATE_ENABLE,
36 	CLK_STATE_RESERVED,
37 	CLK_STATE_UNCHANGED,
38 };
39 
40 struct scmi_msg_resp_clock_protocol_attributes {
41 	__le16 num_clocks;
42 	u8 max_async_req;
43 	u8 reserved;
44 };
45 
46 struct scmi_msg_resp_clock_attributes {
47 	__le32 attributes;
48 #define SUPPORTS_RATE_CHANGED_NOTIF(x)		((x) & BIT(31))
49 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x)	((x) & BIT(30))
50 #define SUPPORTS_EXTENDED_NAMES(x)		((x) & BIT(29))
51 #define SUPPORTS_PARENT_CLOCK(x)		((x) & BIT(28))
52 	u8 name[SCMI_SHORT_NAME_MAX_SIZE];
53 	__le32 clock_enable_latency;
54 };
55 
56 struct scmi_msg_clock_possible_parents {
57 	__le32 id;
58 	__le32 skip_parents;
59 };
60 
61 struct scmi_msg_resp_clock_possible_parents {
62 	__le32 num_parent_flags;
63 #define NUM_PARENTS_RETURNED(x)		((x) & 0xff)
64 #define NUM_PARENTS_REMAINING(x)	((x) >> 24)
65 	__le32 possible_parents[];
66 };
67 
68 struct scmi_msg_clock_set_parent {
69 	__le32 id;
70 	__le32 parent_id;
71 };
72 
73 struct scmi_msg_clock_config_set {
74 	__le32 id;
75 	__le32 attributes;
76 };
77 
78 /* Valid only from SCMI clock v2.1 */
79 struct scmi_msg_clock_config_set_v2 {
80 	__le32 id;
81 	__le32 attributes;
82 #define NULL_OEM_TYPE			0
83 #define REGMASK_OEM_TYPE_SET		GENMASK(23, 16)
84 #define REGMASK_CLK_STATE		GENMASK(1, 0)
85 	__le32 oem_config_val;
86 };
87 
88 struct scmi_msg_clock_config_get {
89 	__le32 id;
90 	__le32 flags;
91 #define REGMASK_OEM_TYPE_GET		GENMASK(7, 0)
92 };
93 
94 struct scmi_msg_resp_clock_config_get {
95 	__le32 attributes;
96 	__le32 config;
97 #define IS_CLK_ENABLED(x)		le32_get_bits((x), BIT(0))
98 	__le32 oem_config_val;
99 };
100 
101 struct scmi_msg_clock_describe_rates {
102 	__le32 id;
103 	__le32 rate_index;
104 };
105 
106 struct scmi_msg_resp_clock_describe_rates {
107 	__le32 num_rates_flags;
108 #define NUM_RETURNED(x)		((x) & 0xfff)
109 #define RATE_DISCRETE(x)	!((x) & BIT(12))
110 #define NUM_REMAINING(x)	((x) >> 16)
111 	struct {
112 		__le32 value_low;
113 		__le32 value_high;
114 	} rate[];
115 #define RATE_TO_U64(X)		\
116 ({				\
117 	typeof(X) x = (X);	\
118 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
119 })
120 };
121 
122 struct scmi_clock_set_rate {
123 	__le32 flags;
124 #define CLOCK_SET_ASYNC		BIT(0)
125 #define CLOCK_SET_IGNORE_RESP	BIT(1)
126 #define CLOCK_SET_ROUND_UP	BIT(2)
127 #define CLOCK_SET_ROUND_AUTO	BIT(3)
128 	__le32 id;
129 	__le32 value_low;
130 	__le32 value_high;
131 };
132 
133 struct scmi_msg_resp_set_rate_complete {
134 	__le32 id;
135 	__le32 rate_low;
136 	__le32 rate_high;
137 };
138 
139 struct scmi_msg_clock_rate_notify {
140 	__le32 clk_id;
141 	__le32 notify_enable;
142 };
143 
144 struct scmi_clock_rate_notify_payld {
145 	__le32 agent_id;
146 	__le32 clock_id;
147 	__le32 rate_low;
148 	__le32 rate_high;
149 };
150 
151 struct clock_info {
152 	u32 version;
153 	int num_clocks;
154 	int max_async_req;
155 	atomic_t cur_async_req;
156 	struct scmi_clock_info *clk;
157 	int (*clock_config_set)(const struct scmi_protocol_handle *ph,
158 				u32 clk_id, enum clk_state state,
159 				u8 oem_type, u32 oem_val, bool atomic);
160 	int (*clock_config_get)(const struct scmi_protocol_handle *ph,
161 				u32 clk_id, u8 oem_type, u32 *attributes,
162 				bool *enabled, u32 *oem_val, bool atomic);
163 };
164 
165 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
166 	CLOCK_RATE_NOTIFY,
167 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
168 };
169 
170 static int
171 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
172 				   struct clock_info *ci)
173 {
174 	int ret;
175 	struct scmi_xfer *t;
176 	struct scmi_msg_resp_clock_protocol_attributes *attr;
177 
178 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
179 				      0, sizeof(*attr), &t);
180 	if (ret)
181 		return ret;
182 
183 	attr = t->rx.buf;
184 
185 	ret = ph->xops->do_xfer(ph, t);
186 	if (!ret) {
187 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
188 		ci->max_async_req = attr->max_async_req;
189 	}
190 
191 	ph->xops->xfer_put(ph, t);
192 	return ret;
193 }
194 
195 struct scmi_clk_ipriv {
196 	struct device *dev;
197 	u32 clk_id;
198 	struct scmi_clock_info *clk;
199 };
200 
201 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
202 						      const void *priv)
203 {
204 	struct scmi_msg_clock_possible_parents *msg = message;
205 	const struct scmi_clk_ipriv *p = priv;
206 
207 	msg->id = cpu_to_le32(p->clk_id);
208 	/* Set the number of OPPs to be skipped/already read */
209 	msg->skip_parents = cpu_to_le32(desc_index);
210 }
211 
212 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
213 						  const void *response, void *priv)
214 {
215 	const struct scmi_msg_resp_clock_possible_parents *r = response;
216 	struct scmi_clk_ipriv *p = priv;
217 	struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
218 	u32 flags;
219 
220 	flags = le32_to_cpu(r->num_parent_flags);
221 	st->num_returned = NUM_PARENTS_RETURNED(flags);
222 	st->num_remaining = NUM_PARENTS_REMAINING(flags);
223 
224 	/*
225 	 * num parents is not declared previously anywhere so we
226 	 * assume it's returned+remaining on first call.
227 	 */
228 	if (!st->max_resources) {
229 		p->clk->num_parents = st->num_returned + st->num_remaining;
230 		p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
231 					       sizeof(*p->clk->parents),
232 					       GFP_KERNEL);
233 		if (!p->clk->parents) {
234 			p->clk->num_parents = 0;
235 			return -ENOMEM;
236 		}
237 		st->max_resources = st->num_returned + st->num_remaining;
238 	}
239 
240 	return 0;
241 }
242 
243 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
244 						      const void *response,
245 						      struct scmi_iterator_state *st,
246 						      void *priv)
247 {
248 	const struct scmi_msg_resp_clock_possible_parents *r = response;
249 	struct scmi_clk_ipriv *p = priv;
250 
251 	u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
252 
253 	*parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
254 
255 	return 0;
256 }
257 
258 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
259 				       struct scmi_clock_info *clk)
260 {
261 	struct scmi_iterator_ops ops = {
262 		.prepare_message = iter_clk_possible_parents_prepare_message,
263 		.update_state = iter_clk_possible_parents_update_state,
264 		.process_response = iter_clk_possible_parents_process_response,
265 	};
266 
267 	struct scmi_clk_ipriv ppriv = {
268 		.clk_id = clk_id,
269 		.clk = clk,
270 		.dev = ph->dev,
271 	};
272 	void *iter;
273 	int ret;
274 
275 	iter = ph->hops->iter_response_init(ph, &ops, 0,
276 					    CLOCK_POSSIBLE_PARENTS_GET,
277 					    sizeof(struct scmi_msg_clock_possible_parents),
278 					    &ppriv);
279 	if (IS_ERR(iter))
280 		return PTR_ERR(iter);
281 
282 	ret = ph->hops->iter_response_run(iter);
283 
284 	return ret;
285 }
286 
287 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
288 				     u32 clk_id, struct scmi_clock_info *clk,
289 				     u32 version)
290 {
291 	int ret;
292 	u32 attributes;
293 	struct scmi_xfer *t;
294 	struct scmi_msg_resp_clock_attributes *attr;
295 
296 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
297 				      sizeof(clk_id), sizeof(*attr), &t);
298 	if (ret)
299 		return ret;
300 
301 	put_unaligned_le32(clk_id, t->tx.buf);
302 	attr = t->rx.buf;
303 
304 	ret = ph->xops->do_xfer(ph, t);
305 	if (!ret) {
306 		u32 latency = 0;
307 		attributes = le32_to_cpu(attr->attributes);
308 		strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
309 		/* clock_enable_latency field is present only since SCMI v3.1 */
310 		if (PROTOCOL_REV_MAJOR(version) >= 0x2)
311 			latency = le32_to_cpu(attr->clock_enable_latency);
312 		clk->enable_latency = latency ? : U32_MAX;
313 	}
314 
315 	ph->xops->xfer_put(ph, t);
316 
317 	/*
318 	 * If supported overwrite short name with the extended one;
319 	 * on error just carry on and use already provided short name.
320 	 */
321 	if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
322 		if (SUPPORTS_EXTENDED_NAMES(attributes))
323 			ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
324 						    NULL, clk->name,
325 						    SCMI_MAX_STR_SIZE);
326 
327 		if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
328 			clk->rate_changed_notifications = true;
329 		if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
330 			clk->rate_change_requested_notifications = true;
331 		if (SUPPORTS_PARENT_CLOCK(attributes))
332 			scmi_clock_possible_parents(ph, clk_id, clk);
333 	}
334 
335 	return ret;
336 }
337 
338 static int rate_cmp_func(const void *_r1, const void *_r2)
339 {
340 	const u64 *r1 = _r1, *r2 = _r2;
341 
342 	if (*r1 < *r2)
343 		return -1;
344 	else if (*r1 == *r2)
345 		return 0;
346 	else
347 		return 1;
348 }
349 
350 static void iter_clk_describe_prepare_message(void *message,
351 					      const unsigned int desc_index,
352 					      const void *priv)
353 {
354 	struct scmi_msg_clock_describe_rates *msg = message;
355 	const struct scmi_clk_ipriv *p = priv;
356 
357 	msg->id = cpu_to_le32(p->clk_id);
358 	/* Set the number of rates to be skipped/already read */
359 	msg->rate_index = cpu_to_le32(desc_index);
360 }
361 
362 static int
363 iter_clk_describe_update_state(struct scmi_iterator_state *st,
364 			       const void *response, void *priv)
365 {
366 	u32 flags;
367 	struct scmi_clk_ipriv *p = priv;
368 	const struct scmi_msg_resp_clock_describe_rates *r = response;
369 
370 	flags = le32_to_cpu(r->num_rates_flags);
371 	st->num_remaining = NUM_REMAINING(flags);
372 	st->num_returned = NUM_RETURNED(flags);
373 	p->clk->rate_discrete = RATE_DISCRETE(flags);
374 
375 	/* Warn about out of spec replies ... */
376 	if (!p->clk->rate_discrete &&
377 	    (st->num_returned != 3 || st->num_remaining != 0)) {
378 		dev_warn(p->dev,
379 			 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
380 			 p->clk->name, st->num_returned, st->num_remaining,
381 			 st->rx_len);
382 
383 		/*
384 		 * A known quirk: a triplet is returned but num_returned != 3
385 		 * Check for a safe payload size and fix.
386 		 */
387 		if (st->num_returned != 3 && st->num_remaining == 0 &&
388 		    st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
389 			st->num_returned = 3;
390 			st->num_remaining = 0;
391 		} else {
392 			dev_err(p->dev,
393 				"Cannot fix out-of-spec reply !\n");
394 			return -EPROTO;
395 		}
396 	}
397 
398 	return 0;
399 }
400 
401 static int
402 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
403 				   const void *response,
404 				   struct scmi_iterator_state *st, void *priv)
405 {
406 	int ret = 0;
407 	struct scmi_clk_ipriv *p = priv;
408 	const struct scmi_msg_resp_clock_describe_rates *r = response;
409 
410 	if (!p->clk->rate_discrete) {
411 		switch (st->desc_index + st->loop_idx) {
412 		case 0:
413 			p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
414 			break;
415 		case 1:
416 			p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
417 			break;
418 		case 2:
419 			p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
420 			break;
421 		default:
422 			ret = -EINVAL;
423 			break;
424 		}
425 	} else {
426 		u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
427 
428 		*rate = RATE_TO_U64(r->rate[st->loop_idx]);
429 		p->clk->list.num_rates++;
430 	}
431 
432 	return ret;
433 }
434 
435 static int
436 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
437 			      struct scmi_clock_info *clk)
438 {
439 	int ret;
440 	void *iter;
441 	struct scmi_iterator_ops ops = {
442 		.prepare_message = iter_clk_describe_prepare_message,
443 		.update_state = iter_clk_describe_update_state,
444 		.process_response = iter_clk_describe_process_response,
445 	};
446 	struct scmi_clk_ipriv cpriv = {
447 		.clk_id = clk_id,
448 		.clk = clk,
449 		.dev = ph->dev,
450 	};
451 
452 	iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
453 					    CLOCK_DESCRIBE_RATES,
454 					    sizeof(struct scmi_msg_clock_describe_rates),
455 					    &cpriv);
456 	if (IS_ERR(iter))
457 		return PTR_ERR(iter);
458 
459 	ret = ph->hops->iter_response_run(iter);
460 	if (ret)
461 		return ret;
462 
463 	if (!clk->rate_discrete) {
464 		dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
465 			clk->range.min_rate, clk->range.max_rate,
466 			clk->range.step_size);
467 	} else if (clk->list.num_rates) {
468 		sort(clk->list.rates, clk->list.num_rates,
469 		     sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
470 	}
471 
472 	return ret;
473 }
474 
475 static int
476 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
477 		    u32 clk_id, u64 *value)
478 {
479 	int ret;
480 	struct scmi_xfer *t;
481 
482 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
483 				      sizeof(__le32), sizeof(u64), &t);
484 	if (ret)
485 		return ret;
486 
487 	put_unaligned_le32(clk_id, t->tx.buf);
488 
489 	ret = ph->xops->do_xfer(ph, t);
490 	if (!ret)
491 		*value = get_unaligned_le64(t->rx.buf);
492 
493 	ph->xops->xfer_put(ph, t);
494 	return ret;
495 }
496 
497 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
498 			       u32 clk_id, u64 rate)
499 {
500 	int ret;
501 	u32 flags = 0;
502 	struct scmi_xfer *t;
503 	struct scmi_clock_set_rate *cfg;
504 	struct clock_info *ci = ph->get_priv(ph);
505 
506 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
507 	if (ret)
508 		return ret;
509 
510 	if (ci->max_async_req &&
511 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
512 		flags |= CLOCK_SET_ASYNC;
513 
514 	cfg = t->tx.buf;
515 	cfg->flags = cpu_to_le32(flags);
516 	cfg->id = cpu_to_le32(clk_id);
517 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
518 	cfg->value_high = cpu_to_le32(rate >> 32);
519 
520 	if (flags & CLOCK_SET_ASYNC) {
521 		ret = ph->xops->do_xfer_with_response(ph, t);
522 		if (!ret) {
523 			struct scmi_msg_resp_set_rate_complete *resp;
524 
525 			resp = t->rx.buf;
526 			if (le32_to_cpu(resp->id) == clk_id)
527 				dev_dbg(ph->dev,
528 					"Clk ID %d set async to %llu\n", clk_id,
529 					get_unaligned_le64(&resp->rate_low));
530 			else
531 				ret = -EPROTO;
532 		}
533 	} else {
534 		ret = ph->xops->do_xfer(ph, t);
535 	}
536 
537 	if (ci->max_async_req)
538 		atomic_dec(&ci->cur_async_req);
539 
540 	ph->xops->xfer_put(ph, t);
541 	return ret;
542 }
543 
544 static int
545 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
546 		      enum clk_state state, u8 __unused0, u32 __unused1,
547 		      bool atomic)
548 {
549 	int ret;
550 	struct scmi_xfer *t;
551 	struct scmi_msg_clock_config_set *cfg;
552 
553 	if (state >= CLK_STATE_RESERVED)
554 		return -EINVAL;
555 
556 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
557 				      sizeof(*cfg), 0, &t);
558 	if (ret)
559 		return ret;
560 
561 	t->hdr.poll_completion = atomic;
562 
563 	cfg = t->tx.buf;
564 	cfg->id = cpu_to_le32(clk_id);
565 	cfg->attributes = cpu_to_le32(state);
566 
567 	ret = ph->xops->do_xfer(ph, t);
568 
569 	ph->xops->xfer_put(ph, t);
570 	return ret;
571 }
572 
573 static int
574 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
575 		      u32 parent_id)
576 {
577 	int ret;
578 	struct scmi_xfer *t;
579 	struct scmi_msg_clock_set_parent *cfg;
580 	struct clock_info *ci = ph->get_priv(ph);
581 	struct scmi_clock_info *clk;
582 
583 	if (clk_id >= ci->num_clocks)
584 		return -EINVAL;
585 
586 	clk = ci->clk + clk_id;
587 
588 	if (parent_id >= clk->num_parents)
589 		return -EINVAL;
590 
591 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
592 				      sizeof(*cfg), 0, &t);
593 	if (ret)
594 		return ret;
595 
596 	t->hdr.poll_completion = false;
597 
598 	cfg = t->tx.buf;
599 	cfg->id = cpu_to_le32(clk_id);
600 	cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
601 
602 	ret = ph->xops->do_xfer(ph, t);
603 
604 	ph->xops->xfer_put(ph, t);
605 
606 	return ret;
607 }
608 
609 static int
610 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
611 		      u32 *parent_id)
612 {
613 	int ret;
614 	struct scmi_xfer *t;
615 
616 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
617 				      sizeof(__le32), sizeof(u32), &t);
618 	if (ret)
619 		return ret;
620 
621 	put_unaligned_le32(clk_id, t->tx.buf);
622 
623 	ret = ph->xops->do_xfer(ph, t);
624 	if (!ret)
625 		*parent_id = get_unaligned_le32(t->rx.buf);
626 
627 	ph->xops->xfer_put(ph, t);
628 	return ret;
629 }
630 
631 /* For SCMI clock v2.1 and onwards */
632 static int
633 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
634 			 enum clk_state state, u8 oem_type, u32 oem_val,
635 			 bool atomic)
636 {
637 	int ret;
638 	u32 attrs;
639 	struct scmi_xfer *t;
640 	struct scmi_msg_clock_config_set_v2 *cfg;
641 
642 	if (state == CLK_STATE_RESERVED ||
643 	    (!oem_type && state == CLK_STATE_UNCHANGED))
644 		return -EINVAL;
645 
646 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
647 				      sizeof(*cfg), 0, &t);
648 	if (ret)
649 		return ret;
650 
651 	t->hdr.poll_completion = atomic;
652 
653 	attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
654 		 FIELD_PREP(REGMASK_CLK_STATE, state);
655 
656 	cfg = t->tx.buf;
657 	cfg->id = cpu_to_le32(clk_id);
658 	cfg->attributes = cpu_to_le32(attrs);
659 	/* Clear in any case */
660 	cfg->oem_config_val = cpu_to_le32(0);
661 	if (oem_type)
662 		cfg->oem_config_val = cpu_to_le32(oem_val);
663 
664 	ret = ph->xops->do_xfer(ph, t);
665 
666 	ph->xops->xfer_put(ph, t);
667 	return ret;
668 }
669 
670 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
671 			     bool atomic)
672 {
673 	struct clock_info *ci = ph->get_priv(ph);
674 
675 	return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
676 				    NULL_OEM_TYPE, 0, atomic);
677 }
678 
679 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
680 			      bool atomic)
681 {
682 	struct clock_info *ci = ph->get_priv(ph);
683 
684 	return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
685 				    NULL_OEM_TYPE, 0, atomic);
686 }
687 
688 /* For SCMI clock v2.1 and onwards */
689 static int
690 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
691 			 u8 oem_type, u32 *attributes, bool *enabled,
692 			 u32 *oem_val, bool atomic)
693 {
694 	int ret;
695 	u32 flags;
696 	struct scmi_xfer *t;
697 	struct scmi_msg_clock_config_get *cfg;
698 
699 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
700 				      sizeof(*cfg), 0, &t);
701 	if (ret)
702 		return ret;
703 
704 	t->hdr.poll_completion = atomic;
705 
706 	flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
707 
708 	cfg = t->tx.buf;
709 	cfg->id = cpu_to_le32(clk_id);
710 	cfg->flags = cpu_to_le32(flags);
711 
712 	ret = ph->xops->do_xfer(ph, t);
713 	if (!ret) {
714 		struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
715 
716 		if (attributes)
717 			*attributes = le32_to_cpu(resp->attributes);
718 
719 		if (enabled)
720 			*enabled = IS_CLK_ENABLED(resp->config);
721 
722 		if (oem_val && oem_type)
723 			*oem_val = le32_to_cpu(resp->oem_config_val);
724 	}
725 
726 	ph->xops->xfer_put(ph, t);
727 
728 	return ret;
729 }
730 
731 static int
732 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
733 		      u8 oem_type, u32 *attributes, bool *enabled,
734 		      u32 *oem_val, bool atomic)
735 {
736 	int ret;
737 	struct scmi_xfer *t;
738 	struct scmi_msg_resp_clock_attributes *resp;
739 
740 	if (!enabled)
741 		return -EINVAL;
742 
743 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
744 				      sizeof(clk_id), sizeof(*resp), &t);
745 	if (ret)
746 		return ret;
747 
748 	t->hdr.poll_completion = atomic;
749 	put_unaligned_le32(clk_id, t->tx.buf);
750 	resp = t->rx.buf;
751 
752 	ret = ph->xops->do_xfer(ph, t);
753 	if (!ret)
754 		*enabled = IS_CLK_ENABLED(resp->attributes);
755 
756 	ph->xops->xfer_put(ph, t);
757 
758 	return ret;
759 }
760 
761 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
762 				u32 clk_id, bool *enabled, bool atomic)
763 {
764 	struct clock_info *ci = ph->get_priv(ph);
765 
766 	return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
767 				    enabled, NULL, atomic);
768 }
769 
770 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
771 				     u32 clk_id, u8 oem_type, u32 oem_val,
772 				     bool atomic)
773 {
774 	struct clock_info *ci = ph->get_priv(ph);
775 
776 	return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
777 				    oem_type, oem_val, atomic);
778 }
779 
780 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
781 				     u32 clk_id, u8 oem_type, u32 *oem_val,
782 				     u32 *attributes, bool atomic)
783 {
784 	struct clock_info *ci = ph->get_priv(ph);
785 
786 	return ci->clock_config_get(ph, clk_id, oem_type, attributes,
787 				    NULL, oem_val, atomic);
788 }
789 
790 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
791 {
792 	struct clock_info *ci = ph->get_priv(ph);
793 
794 	return ci->num_clocks;
795 }
796 
797 static const struct scmi_clock_info *
798 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
799 {
800 	struct scmi_clock_info *clk;
801 	struct clock_info *ci = ph->get_priv(ph);
802 
803 	if (clk_id >= ci->num_clocks)
804 		return NULL;
805 
806 	clk = ci->clk + clk_id;
807 	if (!clk->name[0])
808 		return NULL;
809 
810 	return clk;
811 }
812 
813 static const struct scmi_clk_proto_ops clk_proto_ops = {
814 	.count_get = scmi_clock_count_get,
815 	.info_get = scmi_clock_info_get,
816 	.rate_get = scmi_clock_rate_get,
817 	.rate_set = scmi_clock_rate_set,
818 	.enable = scmi_clock_enable,
819 	.disable = scmi_clock_disable,
820 	.state_get = scmi_clock_state_get,
821 	.config_oem_get = scmi_clock_config_oem_get,
822 	.config_oem_set = scmi_clock_config_oem_set,
823 	.parent_set = scmi_clock_set_parent,
824 	.parent_get = scmi_clock_get_parent,
825 };
826 
827 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
828 				u32 clk_id, int message_id, bool enable)
829 {
830 	int ret;
831 	struct scmi_xfer *t;
832 	struct scmi_msg_clock_rate_notify *notify;
833 
834 	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
835 	if (ret)
836 		return ret;
837 
838 	notify = t->tx.buf;
839 	notify->clk_id = cpu_to_le32(clk_id);
840 	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
841 
842 	ret = ph->xops->do_xfer(ph, t);
843 
844 	ph->xops->xfer_put(ph, t);
845 	return ret;
846 }
847 
848 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
849 				       u8 evt_id, u32 src_id, bool enable)
850 {
851 	int ret, cmd_id;
852 
853 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
854 		return -EINVAL;
855 
856 	cmd_id = evt_2_cmd[evt_id];
857 	ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
858 	if (ret)
859 		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
860 			 evt_id, src_id, ret);
861 
862 	return ret;
863 }
864 
865 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
866 					 u8 evt_id, ktime_t timestamp,
867 					 const void *payld, size_t payld_sz,
868 					 void *report, u32 *src_id)
869 {
870 	const struct scmi_clock_rate_notify_payld *p = payld;
871 	struct scmi_clock_rate_notif_report *r = report;
872 
873 	if (sizeof(*p) != payld_sz ||
874 	    (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
875 	     evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
876 		return NULL;
877 
878 	r->timestamp = timestamp;
879 	r->agent_id = le32_to_cpu(p->agent_id);
880 	r->clock_id = le32_to_cpu(p->clock_id);
881 	r->rate = get_unaligned_le64(&p->rate_low);
882 	*src_id = r->clock_id;
883 
884 	return r;
885 }
886 
887 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
888 {
889 	struct clock_info *ci = ph->get_priv(ph);
890 
891 	if (!ci)
892 		return -EINVAL;
893 
894 	return ci->num_clocks;
895 }
896 
897 static const struct scmi_event clk_events[] = {
898 	{
899 		.id = SCMI_EVENT_CLOCK_RATE_CHANGED,
900 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
901 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
902 	},
903 	{
904 		.id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
905 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
906 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
907 	},
908 };
909 
910 static const struct scmi_event_ops clk_event_ops = {
911 	.get_num_sources = scmi_clk_get_num_sources,
912 	.set_notify_enabled = scmi_clk_set_notify_enabled,
913 	.fill_custom_report = scmi_clk_fill_custom_report,
914 };
915 
916 static const struct scmi_protocol_events clk_protocol_events = {
917 	.queue_sz = SCMI_PROTO_QUEUE_SZ,
918 	.ops = &clk_event_ops,
919 	.evts = clk_events,
920 	.num_events = ARRAY_SIZE(clk_events),
921 };
922 
923 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
924 {
925 	u32 version;
926 	int clkid, ret;
927 	struct clock_info *cinfo;
928 
929 	ret = ph->xops->version_get(ph, &version);
930 	if (ret)
931 		return ret;
932 
933 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
934 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
935 
936 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
937 	if (!cinfo)
938 		return -ENOMEM;
939 
940 	ret = scmi_clock_protocol_attributes_get(ph, cinfo);
941 	if (ret)
942 		return ret;
943 
944 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
945 				  sizeof(*cinfo->clk), GFP_KERNEL);
946 	if (!cinfo->clk)
947 		return -ENOMEM;
948 
949 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
950 		struct scmi_clock_info *clk = cinfo->clk + clkid;
951 
952 		ret = scmi_clock_attributes_get(ph, clkid, clk, version);
953 		if (!ret)
954 			scmi_clock_describe_rates_get(ph, clkid, clk);
955 	}
956 
957 	if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
958 	    PROTOCOL_REV_MINOR(version) >= 0x1) {
959 		cinfo->clock_config_set = scmi_clock_config_set_v2;
960 		cinfo->clock_config_get = scmi_clock_config_get_v2;
961 	} else {
962 		cinfo->clock_config_set = scmi_clock_config_set;
963 		cinfo->clock_config_get = scmi_clock_config_get;
964 	}
965 
966 	cinfo->version = version;
967 	return ph->set_priv(ph, cinfo, version);
968 }
969 
970 static const struct scmi_protocol scmi_clock = {
971 	.id = SCMI_PROTOCOL_CLOCK,
972 	.owner = THIS_MODULE,
973 	.instance_init = &scmi_clock_protocol_init,
974 	.ops = &clk_proto_ops,
975 	.events = &clk_protocol_events,
976 	.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
977 };
978 
979 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
980