xref: /linux/drivers/firmware/arm_scmi/clock.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7 
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11 
12 #include "protocols.h"
13 #include "notify.h"
14 #include "quirks.h"
15 
16 /* Updated only after ALL the mandatory features for that version are merged */
17 #define SCMI_PROTOCOL_SUPPORTED_VERSION		0x30000
18 
19 enum scmi_clock_protocol_cmd {
20 	CLOCK_ATTRIBUTES = 0x3,
21 	CLOCK_DESCRIBE_RATES = 0x4,
22 	CLOCK_RATE_SET = 0x5,
23 	CLOCK_RATE_GET = 0x6,
24 	CLOCK_CONFIG_SET = 0x7,
25 	CLOCK_NAME_GET = 0x8,
26 	CLOCK_RATE_NOTIFY = 0x9,
27 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
28 	CLOCK_CONFIG_GET = 0xB,
29 	CLOCK_POSSIBLE_PARENTS_GET = 0xC,
30 	CLOCK_PARENT_SET = 0xD,
31 	CLOCK_PARENT_GET = 0xE,
32 	CLOCK_GET_PERMISSIONS = 0xF,
33 };
34 
35 #define CLOCK_STATE_CONTROL_ALLOWED	BIT(31)
36 #define CLOCK_PARENT_CONTROL_ALLOWED	BIT(30)
37 #define CLOCK_RATE_CONTROL_ALLOWED	BIT(29)
38 
39 enum clk_state {
40 	CLK_STATE_DISABLE,
41 	CLK_STATE_ENABLE,
42 	CLK_STATE_RESERVED,
43 	CLK_STATE_UNCHANGED,
44 };
45 
46 struct scmi_msg_resp_clock_protocol_attributes {
47 	__le16 num_clocks;
48 	u8 max_async_req;
49 	u8 reserved;
50 };
51 
52 struct scmi_msg_resp_clock_attributes {
53 	__le32 attributes;
54 #define SUPPORTS_RATE_CHANGED_NOTIF(x)		((x) & BIT(31))
55 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x)	((x) & BIT(30))
56 #define SUPPORTS_EXTENDED_NAMES(x)		((x) & BIT(29))
57 #define SUPPORTS_PARENT_CLOCK(x)		((x) & BIT(28))
58 #define SUPPORTS_EXTENDED_CONFIG(x)		((x) & BIT(27))
59 #define SUPPORTS_GET_PERMISSIONS(x)		((x) & BIT(1))
60 	u8 name[SCMI_SHORT_NAME_MAX_SIZE];
61 	__le32 clock_enable_latency;
62 };
63 
64 struct scmi_msg_clock_possible_parents {
65 	__le32 id;
66 	__le32 skip_parents;
67 };
68 
69 struct scmi_msg_resp_clock_possible_parents {
70 	__le32 num_parent_flags;
71 #define NUM_PARENTS_RETURNED(x)		((x) & 0xff)
72 #define NUM_PARENTS_REMAINING(x)	((x) >> 24)
73 	__le32 possible_parents[];
74 };
75 
76 struct scmi_msg_clock_set_parent {
77 	__le32 id;
78 	__le32 parent_id;
79 };
80 
81 struct scmi_msg_clock_config_set {
82 	__le32 id;
83 	__le32 attributes;
84 };
85 
86 /* Valid only from SCMI clock v2.1 */
87 struct scmi_msg_clock_config_set_v2 {
88 	__le32 id;
89 	__le32 attributes;
90 #define NULL_OEM_TYPE			0
91 #define REGMASK_OEM_TYPE_SET		GENMASK(23, 16)
92 #define REGMASK_CLK_STATE		GENMASK(1, 0)
93 	__le32 oem_config_val;
94 };
95 
96 struct scmi_msg_clock_config_get {
97 	__le32 id;
98 	__le32 flags;
99 #define REGMASK_OEM_TYPE_GET		GENMASK(7, 0)
100 };
101 
102 struct scmi_msg_resp_clock_config_get {
103 	__le32 attributes;
104 	__le32 config;
105 #define IS_CLK_ENABLED(x)		le32_get_bits((x), BIT(0))
106 	__le32 oem_config_val;
107 };
108 
109 struct scmi_msg_clock_describe_rates {
110 	__le32 id;
111 	__le32 rate_index;
112 };
113 
114 struct scmi_msg_resp_clock_describe_rates {
115 	__le32 num_rates_flags;
116 #define NUM_RETURNED(x)		((x) & 0xfff)
117 #define RATE_DISCRETE(x)	!((x) & BIT(12))
118 #define NUM_REMAINING(x)	((x) >> 16)
119 	struct {
120 		__le32 value_low;
121 		__le32 value_high;
122 	} rate[];
123 #define RATE_TO_U64(X)		\
124 ({				\
125 	typeof(X) x = (X);	\
126 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
127 })
128 };
129 
130 struct scmi_clock_set_rate {
131 	__le32 flags;
132 #define CLOCK_SET_ASYNC		BIT(0)
133 #define CLOCK_SET_IGNORE_RESP	BIT(1)
134 #define CLOCK_SET_ROUND_UP	BIT(2)
135 #define CLOCK_SET_ROUND_AUTO	BIT(3)
136 	__le32 id;
137 	__le32 value_low;
138 	__le32 value_high;
139 };
140 
141 struct scmi_msg_resp_set_rate_complete {
142 	__le32 id;
143 	__le32 rate_low;
144 	__le32 rate_high;
145 };
146 
147 struct scmi_msg_clock_rate_notify {
148 	__le32 clk_id;
149 	__le32 notify_enable;
150 };
151 
152 struct scmi_clock_rate_notify_payld {
153 	__le32 agent_id;
154 	__le32 clock_id;
155 	__le32 rate_low;
156 	__le32 rate_high;
157 };
158 
159 struct clock_info {
160 	int num_clocks;
161 	int max_async_req;
162 	bool notify_rate_changed_cmd;
163 	bool notify_rate_change_requested_cmd;
164 	atomic_t cur_async_req;
165 	struct scmi_clock_info *clk;
166 	int (*clock_config_set)(const struct scmi_protocol_handle *ph,
167 				u32 clk_id, enum clk_state state,
168 				enum scmi_clock_oem_config oem_type,
169 				u32 oem_val, bool atomic);
170 	int (*clock_config_get)(const struct scmi_protocol_handle *ph,
171 				u32 clk_id, enum scmi_clock_oem_config oem_type,
172 				u32 *attributes, bool *enabled, u32 *oem_val,
173 				bool atomic);
174 };
175 
176 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
177 	CLOCK_RATE_NOTIFY,
178 	CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
179 };
180 
181 static inline struct scmi_clock_info *
182 scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
183 {
184 	if (clk_id >= ci->num_clocks)
185 		return ERR_PTR(-EINVAL);
186 
187 	return ci->clk + clk_id;
188 }
189 
190 static int
191 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
192 				   struct clock_info *ci)
193 {
194 	int ret;
195 	struct scmi_xfer *t;
196 	struct scmi_msg_resp_clock_protocol_attributes *attr;
197 
198 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
199 				      0, sizeof(*attr), &t);
200 	if (ret)
201 		return ret;
202 
203 	attr = t->rx.buf;
204 
205 	ret = ph->xops->do_xfer(ph, t);
206 	if (!ret) {
207 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
208 		ci->max_async_req = attr->max_async_req;
209 	}
210 
211 	ph->xops->xfer_put(ph, t);
212 
213 	if (!ret) {
214 		if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
215 			ci->notify_rate_changed_cmd = true;
216 
217 		if (!ph->hops->protocol_msg_check(ph,
218 						  CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
219 						  NULL))
220 			ci->notify_rate_change_requested_cmd = true;
221 	}
222 
223 	return ret;
224 }
225 
226 struct scmi_clk_ipriv {
227 	struct device *dev;
228 	u32 clk_id;
229 	struct scmi_clock_info *clk;
230 };
231 
232 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
233 						      const void *priv)
234 {
235 	struct scmi_msg_clock_possible_parents *msg = message;
236 	const struct scmi_clk_ipriv *p = priv;
237 
238 	msg->id = cpu_to_le32(p->clk_id);
239 	/* Set the number of OPPs to be skipped/already read */
240 	msg->skip_parents = cpu_to_le32(desc_index);
241 }
242 
243 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
244 						  const void *response, void *priv)
245 {
246 	const struct scmi_msg_resp_clock_possible_parents *r = response;
247 	struct scmi_clk_ipriv *p = priv;
248 	struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
249 	u32 flags;
250 
251 	flags = le32_to_cpu(r->num_parent_flags);
252 	st->num_returned = NUM_PARENTS_RETURNED(flags);
253 	st->num_remaining = NUM_PARENTS_REMAINING(flags);
254 
255 	/*
256 	 * num parents is not declared previously anywhere so we
257 	 * assume it's returned+remaining on first call.
258 	 */
259 	if (!st->max_resources) {
260 		p->clk->num_parents = st->num_returned + st->num_remaining;
261 		p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
262 					       sizeof(*p->clk->parents),
263 					       GFP_KERNEL);
264 		if (!p->clk->parents) {
265 			p->clk->num_parents = 0;
266 			return -ENOMEM;
267 		}
268 		st->max_resources = st->num_returned + st->num_remaining;
269 	}
270 
271 	return 0;
272 }
273 
274 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
275 						      const void *response,
276 						      struct scmi_iterator_state *st,
277 						      void *priv)
278 {
279 	const struct scmi_msg_resp_clock_possible_parents *r = response;
280 	struct scmi_clk_ipriv *p = priv;
281 
282 	u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
283 
284 	*parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
285 
286 	return 0;
287 }
288 
289 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
290 				       struct scmi_clock_info *clk)
291 {
292 	struct scmi_iterator_ops ops = {
293 		.prepare_message = iter_clk_possible_parents_prepare_message,
294 		.update_state = iter_clk_possible_parents_update_state,
295 		.process_response = iter_clk_possible_parents_process_response,
296 	};
297 
298 	struct scmi_clk_ipriv ppriv = {
299 		.clk_id = clk_id,
300 		.clk = clk,
301 		.dev = ph->dev,
302 	};
303 	void *iter;
304 	int ret;
305 
306 	iter = ph->hops->iter_response_init(ph, &ops, 0,
307 					    CLOCK_POSSIBLE_PARENTS_GET,
308 					    sizeof(struct scmi_msg_clock_possible_parents),
309 					    &ppriv);
310 	if (IS_ERR(iter))
311 		return PTR_ERR(iter);
312 
313 	ret = ph->hops->iter_response_run(iter);
314 
315 	return ret;
316 }
317 
318 static int
319 scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
320 			   struct scmi_clock_info *clk)
321 {
322 	struct scmi_xfer *t;
323 	u32 perm;
324 	int ret;
325 
326 	ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
327 				      sizeof(clk_id), sizeof(perm), &t);
328 	if (ret)
329 		return ret;
330 
331 	put_unaligned_le32(clk_id, t->tx.buf);
332 
333 	ret = ph->xops->do_xfer(ph, t);
334 	if (!ret) {
335 		perm = get_unaligned_le32(t->rx.buf);
336 
337 		clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
338 		clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
339 		clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
340 	}
341 
342 	ph->xops->xfer_put(ph, t);
343 
344 	return ret;
345 }
346 
347 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
348 				     u32 clk_id, struct clock_info *cinfo)
349 {
350 	int ret;
351 	u32 attributes;
352 	struct scmi_xfer *t;
353 	struct scmi_msg_resp_clock_attributes *attr;
354 	struct scmi_clock_info *clk = cinfo->clk + clk_id;
355 
356 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
357 				      sizeof(clk_id), sizeof(*attr), &t);
358 	if (ret)
359 		return ret;
360 
361 	put_unaligned_le32(clk_id, t->tx.buf);
362 	attr = t->rx.buf;
363 
364 	ret = ph->xops->do_xfer(ph, t);
365 	if (!ret) {
366 		u32 latency = 0;
367 
368 		attributes = le32_to_cpu(attr->attributes);
369 		strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
370 		/* clock_enable_latency field is present only since SCMI v3.1 */
371 		if (PROTOCOL_REV_MAJOR(ph->version) >= 0x2)
372 			latency = le32_to_cpu(attr->clock_enable_latency);
373 		clk->enable_latency = latency ? : U32_MAX;
374 	}
375 
376 	ph->xops->xfer_put(ph, t);
377 
378 	/*
379 	 * If supported overwrite short name with the extended one;
380 	 * on error just carry on and use already provided short name.
381 	 */
382 	if (!ret && PROTOCOL_REV_MAJOR(ph->version) >= 0x2) {
383 		if (SUPPORTS_EXTENDED_NAMES(attributes))
384 			ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
385 						    NULL, clk->name,
386 						    SCMI_MAX_STR_SIZE);
387 
388 		if (cinfo->notify_rate_changed_cmd &&
389 		    SUPPORTS_RATE_CHANGED_NOTIF(attributes))
390 			clk->rate_changed_notifications = true;
391 		if (cinfo->notify_rate_change_requested_cmd &&
392 		    SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
393 			clk->rate_change_requested_notifications = true;
394 		if (PROTOCOL_REV_MAJOR(ph->version) >= 0x3) {
395 			if (SUPPORTS_PARENT_CLOCK(attributes))
396 				scmi_clock_possible_parents(ph, clk_id, clk);
397 			if (SUPPORTS_GET_PERMISSIONS(attributes))
398 				scmi_clock_get_permissions(ph, clk_id, clk);
399 			if (SUPPORTS_EXTENDED_CONFIG(attributes))
400 				clk->extended_config = true;
401 		}
402 	}
403 
404 	return ret;
405 }
406 
407 static int rate_cmp_func(const void *_r1, const void *_r2)
408 {
409 	const u64 *r1 = _r1, *r2 = _r2;
410 
411 	if (*r1 < *r2)
412 		return -1;
413 	else if (*r1 == *r2)
414 		return 0;
415 	else
416 		return 1;
417 }
418 
419 static void iter_clk_describe_prepare_message(void *message,
420 					      const unsigned int desc_index,
421 					      const void *priv)
422 {
423 	struct scmi_msg_clock_describe_rates *msg = message;
424 	const struct scmi_clk_ipriv *p = priv;
425 
426 	msg->id = cpu_to_le32(p->clk_id);
427 	/* Set the number of rates to be skipped/already read */
428 	msg->rate_index = cpu_to_le32(desc_index);
429 }
430 
431 #define QUIRK_OUT_OF_SPEC_TRIPLET					       \
432 	({								       \
433 		/*							       \
434 		 * A known quirk: a triplet is returned but num_returned != 3  \
435 		 * Check for a safe payload size and fix.		       \
436 		 */							       \
437 		if (st->num_returned != 3 && st->num_remaining == 0 &&	       \
438 		    st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {       \
439 			st->num_returned = 3;				       \
440 			st->num_remaining = 0;				       \
441 		} else {						       \
442 			dev_err(p->dev,					       \
443 				"Cannot fix out-of-spec reply !\n");	       \
444 			return -EPROTO;					       \
445 		}							       \
446 	})
447 
448 static int
449 iter_clk_describe_update_state(struct scmi_iterator_state *st,
450 			       const void *response, void *priv)
451 {
452 	u32 flags;
453 	struct scmi_clk_ipriv *p = priv;
454 	const struct scmi_msg_resp_clock_describe_rates *r = response;
455 
456 	flags = le32_to_cpu(r->num_rates_flags);
457 	st->num_remaining = NUM_REMAINING(flags);
458 	st->num_returned = NUM_RETURNED(flags);
459 	p->clk->rate_discrete = RATE_DISCRETE(flags);
460 
461 	/* Warn about out of spec replies ... */
462 	if (!p->clk->rate_discrete &&
463 	    (st->num_returned != 3 || st->num_remaining != 0)) {
464 		dev_warn(p->dev,
465 			 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
466 			 p->clk->name, st->num_returned, st->num_remaining,
467 			 st->rx_len);
468 
469 		SCMI_QUIRK(clock_rates_triplet_out_of_spec,
470 			   QUIRK_OUT_OF_SPEC_TRIPLET);
471 	}
472 
473 	return 0;
474 }
475 
476 static int
477 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
478 				   const void *response,
479 				   struct scmi_iterator_state *st, void *priv)
480 {
481 	int ret = 0;
482 	struct scmi_clk_ipriv *p = priv;
483 	const struct scmi_msg_resp_clock_describe_rates *r = response;
484 
485 	if (!p->clk->rate_discrete) {
486 		switch (st->desc_index + st->loop_idx) {
487 		case 0:
488 			p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
489 			break;
490 		case 1:
491 			p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
492 			break;
493 		case 2:
494 			p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
495 			break;
496 		default:
497 			ret = -EINVAL;
498 			break;
499 		}
500 	} else {
501 		u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
502 
503 		*rate = RATE_TO_U64(r->rate[st->loop_idx]);
504 		p->clk->list.num_rates++;
505 	}
506 
507 	return ret;
508 }
509 
510 static int
511 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
512 			      struct scmi_clock_info *clk)
513 {
514 	int ret;
515 	void *iter;
516 	struct scmi_iterator_ops ops = {
517 		.prepare_message = iter_clk_describe_prepare_message,
518 		.update_state = iter_clk_describe_update_state,
519 		.process_response = iter_clk_describe_process_response,
520 	};
521 	struct scmi_clk_ipriv cpriv = {
522 		.clk_id = clk_id,
523 		.clk = clk,
524 		.dev = ph->dev,
525 	};
526 
527 	iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
528 					    CLOCK_DESCRIBE_RATES,
529 					    sizeof(struct scmi_msg_clock_describe_rates),
530 					    &cpriv);
531 	if (IS_ERR(iter))
532 		return PTR_ERR(iter);
533 
534 	ret = ph->hops->iter_response_run(iter);
535 	if (ret)
536 		return ret;
537 
538 	if (!clk->rate_discrete) {
539 		dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
540 			clk->range.min_rate, clk->range.max_rate,
541 			clk->range.step_size);
542 	} else if (clk->list.num_rates) {
543 		sort(clk->list.rates, clk->list.num_rates,
544 		     sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
545 	}
546 
547 	return ret;
548 }
549 
550 static int
551 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
552 		    u32 clk_id, u64 *value)
553 {
554 	int ret;
555 	struct scmi_xfer *t;
556 
557 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
558 				      sizeof(__le32), sizeof(u64), &t);
559 	if (ret)
560 		return ret;
561 
562 	put_unaligned_le32(clk_id, t->tx.buf);
563 
564 	ret = ph->xops->do_xfer(ph, t);
565 	if (!ret)
566 		*value = get_unaligned_le64(t->rx.buf);
567 
568 	ph->xops->xfer_put(ph, t);
569 	return ret;
570 }
571 
572 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
573 			       u32 clk_id, u64 rate)
574 {
575 	int ret;
576 	u32 flags = 0;
577 	struct scmi_xfer *t;
578 	struct scmi_clock_set_rate *cfg;
579 	struct clock_info *ci = ph->get_priv(ph);
580 	struct scmi_clock_info *clk;
581 
582 	clk = scmi_clock_domain_lookup(ci, clk_id);
583 	if (IS_ERR(clk))
584 		return PTR_ERR(clk);
585 
586 	if (clk->rate_ctrl_forbidden)
587 		return -EACCES;
588 
589 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
590 	if (ret)
591 		return ret;
592 
593 	if (ci->max_async_req &&
594 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
595 		flags |= CLOCK_SET_ASYNC;
596 
597 	cfg = t->tx.buf;
598 	cfg->flags = cpu_to_le32(flags);
599 	cfg->id = cpu_to_le32(clk_id);
600 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
601 	cfg->value_high = cpu_to_le32(rate >> 32);
602 
603 	if (flags & CLOCK_SET_ASYNC) {
604 		ret = ph->xops->do_xfer_with_response(ph, t);
605 		if (!ret) {
606 			struct scmi_msg_resp_set_rate_complete *resp;
607 
608 			resp = t->rx.buf;
609 			if (le32_to_cpu(resp->id) == clk_id)
610 				dev_dbg(ph->dev,
611 					"Clk ID %d set async to %llu\n", clk_id,
612 					get_unaligned_le64(&resp->rate_low));
613 			else
614 				ret = -EPROTO;
615 		}
616 	} else {
617 		ret = ph->xops->do_xfer(ph, t);
618 	}
619 
620 	if (ci->max_async_req)
621 		atomic_dec(&ci->cur_async_req);
622 
623 	ph->xops->xfer_put(ph, t);
624 	return ret;
625 }
626 
627 static int
628 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
629 		      enum clk_state state,
630 		      enum scmi_clock_oem_config __unused0, u32 __unused1,
631 		      bool atomic)
632 {
633 	int ret;
634 	struct scmi_xfer *t;
635 	struct scmi_msg_clock_config_set *cfg;
636 
637 	if (state >= CLK_STATE_RESERVED)
638 		return -EINVAL;
639 
640 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
641 				      sizeof(*cfg), 0, &t);
642 	if (ret)
643 		return ret;
644 
645 	t->hdr.poll_completion = atomic;
646 
647 	cfg = t->tx.buf;
648 	cfg->id = cpu_to_le32(clk_id);
649 	cfg->attributes = cpu_to_le32(state);
650 
651 	ret = ph->xops->do_xfer(ph, t);
652 
653 	ph->xops->xfer_put(ph, t);
654 	return ret;
655 }
656 
657 static int
658 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
659 		      u32 parent_id)
660 {
661 	int ret;
662 	struct scmi_xfer *t;
663 	struct scmi_msg_clock_set_parent *cfg;
664 	struct clock_info *ci = ph->get_priv(ph);
665 	struct scmi_clock_info *clk;
666 
667 	clk = scmi_clock_domain_lookup(ci, clk_id);
668 	if (IS_ERR(clk))
669 		return PTR_ERR(clk);
670 
671 	if (parent_id >= clk->num_parents)
672 		return -EINVAL;
673 
674 	if (clk->parent_ctrl_forbidden)
675 		return -EACCES;
676 
677 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
678 				      sizeof(*cfg), 0, &t);
679 	if (ret)
680 		return ret;
681 
682 	t->hdr.poll_completion = false;
683 
684 	cfg = t->tx.buf;
685 	cfg->id = cpu_to_le32(clk_id);
686 	cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
687 
688 	ret = ph->xops->do_xfer(ph, t);
689 
690 	ph->xops->xfer_put(ph, t);
691 
692 	return ret;
693 }
694 
695 static int
696 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
697 		      u32 *parent_id)
698 {
699 	int ret;
700 	struct scmi_xfer *t;
701 
702 	ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
703 				      sizeof(__le32), sizeof(u32), &t);
704 	if (ret)
705 		return ret;
706 
707 	put_unaligned_le32(clk_id, t->tx.buf);
708 
709 	ret = ph->xops->do_xfer(ph, t);
710 	if (!ret)
711 		*parent_id = get_unaligned_le32(t->rx.buf);
712 
713 	ph->xops->xfer_put(ph, t);
714 	return ret;
715 }
716 
717 /* For SCMI clock v3.0 and onwards */
718 static int
719 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
720 			 enum clk_state state,
721 			 enum scmi_clock_oem_config oem_type, u32 oem_val,
722 			 bool atomic)
723 {
724 	int ret;
725 	u32 attrs;
726 	struct scmi_xfer *t;
727 	struct scmi_msg_clock_config_set_v2 *cfg;
728 
729 	if (state == CLK_STATE_RESERVED ||
730 	    (!oem_type && state == CLK_STATE_UNCHANGED))
731 		return -EINVAL;
732 
733 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
734 				      sizeof(*cfg), 0, &t);
735 	if (ret)
736 		return ret;
737 
738 	t->hdr.poll_completion = atomic;
739 
740 	attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
741 		 FIELD_PREP(REGMASK_CLK_STATE, state);
742 
743 	cfg = t->tx.buf;
744 	cfg->id = cpu_to_le32(clk_id);
745 	cfg->attributes = cpu_to_le32(attrs);
746 	/* Clear in any case */
747 	cfg->oem_config_val = cpu_to_le32(0);
748 	if (oem_type)
749 		cfg->oem_config_val = cpu_to_le32(oem_val);
750 
751 	ret = ph->xops->do_xfer(ph, t);
752 
753 	ph->xops->xfer_put(ph, t);
754 	return ret;
755 }
756 
757 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
758 			     bool atomic)
759 {
760 	struct clock_info *ci = ph->get_priv(ph);
761 	struct scmi_clock_info *clk;
762 
763 	clk = scmi_clock_domain_lookup(ci, clk_id);
764 	if (IS_ERR(clk))
765 		return PTR_ERR(clk);
766 
767 	if (clk->state_ctrl_forbidden)
768 		return -EACCES;
769 
770 	return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
771 				    NULL_OEM_TYPE, 0, atomic);
772 }
773 
774 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
775 			      bool atomic)
776 {
777 	struct clock_info *ci = ph->get_priv(ph);
778 	struct scmi_clock_info *clk;
779 
780 	clk = scmi_clock_domain_lookup(ci, clk_id);
781 	if (IS_ERR(clk))
782 		return PTR_ERR(clk);
783 
784 	if (clk->state_ctrl_forbidden)
785 		return -EACCES;
786 
787 	return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
788 				    NULL_OEM_TYPE, 0, atomic);
789 }
790 
791 /* For SCMI clock v3.0 and onwards */
792 static int
793 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
794 			 enum scmi_clock_oem_config oem_type, u32 *attributes,
795 			 bool *enabled, u32 *oem_val, bool atomic)
796 {
797 	int ret;
798 	u32 flags;
799 	struct scmi_xfer *t;
800 	struct scmi_msg_clock_config_get *cfg;
801 
802 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
803 				      sizeof(*cfg), 0, &t);
804 	if (ret)
805 		return ret;
806 
807 	t->hdr.poll_completion = atomic;
808 
809 	flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
810 
811 	cfg = t->tx.buf;
812 	cfg->id = cpu_to_le32(clk_id);
813 	cfg->flags = cpu_to_le32(flags);
814 
815 	ret = ph->xops->do_xfer(ph, t);
816 	if (!ret) {
817 		struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
818 
819 		if (attributes)
820 			*attributes = le32_to_cpu(resp->attributes);
821 
822 		if (enabled)
823 			*enabled = IS_CLK_ENABLED(resp->config);
824 
825 		if (oem_val && oem_type)
826 			*oem_val = le32_to_cpu(resp->oem_config_val);
827 	}
828 
829 	ph->xops->xfer_put(ph, t);
830 
831 	return ret;
832 }
833 
834 static int
835 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
836 		      enum scmi_clock_oem_config oem_type, u32 *attributes,
837 		      bool *enabled, u32 *oem_val, bool atomic)
838 {
839 	int ret;
840 	struct scmi_xfer *t;
841 	struct scmi_msg_resp_clock_attributes *resp;
842 
843 	if (!enabled)
844 		return -EINVAL;
845 
846 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
847 				      sizeof(clk_id), sizeof(*resp), &t);
848 	if (ret)
849 		return ret;
850 
851 	t->hdr.poll_completion = atomic;
852 	put_unaligned_le32(clk_id, t->tx.buf);
853 	resp = t->rx.buf;
854 
855 	ret = ph->xops->do_xfer(ph, t);
856 	if (!ret)
857 		*enabled = IS_CLK_ENABLED(resp->attributes);
858 
859 	ph->xops->xfer_put(ph, t);
860 
861 	return ret;
862 }
863 
864 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
865 				u32 clk_id, bool *enabled, bool atomic)
866 {
867 	struct clock_info *ci = ph->get_priv(ph);
868 
869 	return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
870 				    enabled, NULL, atomic);
871 }
872 
873 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
874 				     u32 clk_id,
875 				     enum scmi_clock_oem_config oem_type,
876 				     u32 oem_val, bool atomic)
877 {
878 	struct clock_info *ci = ph->get_priv(ph);
879 	struct scmi_clock_info *clk;
880 
881 	clk = scmi_clock_domain_lookup(ci, clk_id);
882 	if (IS_ERR(clk))
883 		return PTR_ERR(clk);
884 
885 	if (!clk->extended_config)
886 		return -EOPNOTSUPP;
887 
888 	return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
889 				    oem_type, oem_val, atomic);
890 }
891 
892 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
893 				     u32 clk_id,
894 				     enum scmi_clock_oem_config oem_type,
895 				     u32 *oem_val, u32 *attributes, bool atomic)
896 {
897 	struct clock_info *ci = ph->get_priv(ph);
898 	struct scmi_clock_info *clk;
899 
900 	clk = scmi_clock_domain_lookup(ci, clk_id);
901 	if (IS_ERR(clk))
902 		return PTR_ERR(clk);
903 
904 	if (!clk->extended_config)
905 		return -EOPNOTSUPP;
906 
907 	return ci->clock_config_get(ph, clk_id, oem_type, attributes,
908 				    NULL, oem_val, atomic);
909 }
910 
911 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
912 {
913 	struct clock_info *ci = ph->get_priv(ph);
914 
915 	return ci->num_clocks;
916 }
917 
918 static const struct scmi_clock_info *
919 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
920 {
921 	struct scmi_clock_info *clk;
922 	struct clock_info *ci = ph->get_priv(ph);
923 
924 	clk = scmi_clock_domain_lookup(ci, clk_id);
925 	if (IS_ERR(clk))
926 		return NULL;
927 
928 	if (!clk->name[0])
929 		return NULL;
930 
931 	return clk;
932 }
933 
934 static const struct scmi_clk_proto_ops clk_proto_ops = {
935 	.count_get = scmi_clock_count_get,
936 	.info_get = scmi_clock_info_get,
937 	.rate_get = scmi_clock_rate_get,
938 	.rate_set = scmi_clock_rate_set,
939 	.enable = scmi_clock_enable,
940 	.disable = scmi_clock_disable,
941 	.state_get = scmi_clock_state_get,
942 	.config_oem_get = scmi_clock_config_oem_get,
943 	.config_oem_set = scmi_clock_config_oem_set,
944 	.parent_set = scmi_clock_set_parent,
945 	.parent_get = scmi_clock_get_parent,
946 };
947 
948 static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
949 				      u8 evt_id, u32 src_id)
950 {
951 	bool supported;
952 	struct scmi_clock_info *clk;
953 	struct clock_info *ci = ph->get_priv(ph);
954 
955 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
956 		return false;
957 
958 	clk = scmi_clock_domain_lookup(ci, src_id);
959 	if (IS_ERR(clk))
960 		return false;
961 
962 	if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
963 		supported = clk->rate_changed_notifications;
964 	else
965 		supported = clk->rate_change_requested_notifications;
966 
967 	return supported;
968 }
969 
970 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
971 				u32 clk_id, int message_id, bool enable)
972 {
973 	int ret;
974 	struct scmi_xfer *t;
975 	struct scmi_msg_clock_rate_notify *notify;
976 
977 	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
978 	if (ret)
979 		return ret;
980 
981 	notify = t->tx.buf;
982 	notify->clk_id = cpu_to_le32(clk_id);
983 	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
984 
985 	ret = ph->xops->do_xfer(ph, t);
986 
987 	ph->xops->xfer_put(ph, t);
988 	return ret;
989 }
990 
991 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
992 				       u8 evt_id, u32 src_id, bool enable)
993 {
994 	int ret, cmd_id;
995 
996 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
997 		return -EINVAL;
998 
999 	cmd_id = evt_2_cmd[evt_id];
1000 	ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
1001 	if (ret)
1002 		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
1003 			 evt_id, src_id, ret);
1004 
1005 	return ret;
1006 }
1007 
1008 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
1009 					 u8 evt_id, ktime_t timestamp,
1010 					 const void *payld, size_t payld_sz,
1011 					 void *report, u32 *src_id)
1012 {
1013 	const struct scmi_clock_rate_notify_payld *p = payld;
1014 	struct scmi_clock_rate_notif_report *r = report;
1015 
1016 	if (sizeof(*p) != payld_sz ||
1017 	    (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
1018 	     evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
1019 		return NULL;
1020 
1021 	r->timestamp = timestamp;
1022 	r->agent_id = le32_to_cpu(p->agent_id);
1023 	r->clock_id = le32_to_cpu(p->clock_id);
1024 	r->rate = get_unaligned_le64(&p->rate_low);
1025 	*src_id = r->clock_id;
1026 
1027 	return r;
1028 }
1029 
1030 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
1031 {
1032 	struct clock_info *ci = ph->get_priv(ph);
1033 
1034 	if (!ci)
1035 		return -EINVAL;
1036 
1037 	return ci->num_clocks;
1038 }
1039 
1040 static const struct scmi_event clk_events[] = {
1041 	{
1042 		.id = SCMI_EVENT_CLOCK_RATE_CHANGED,
1043 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1044 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1045 	},
1046 	{
1047 		.id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
1048 		.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1049 		.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1050 	},
1051 };
1052 
1053 static const struct scmi_event_ops clk_event_ops = {
1054 	.is_notify_supported = scmi_clk_notify_supported,
1055 	.get_num_sources = scmi_clk_get_num_sources,
1056 	.set_notify_enabled = scmi_clk_set_notify_enabled,
1057 	.fill_custom_report = scmi_clk_fill_custom_report,
1058 };
1059 
1060 static const struct scmi_protocol_events clk_protocol_events = {
1061 	.queue_sz = SCMI_PROTO_QUEUE_SZ,
1062 	.ops = &clk_event_ops,
1063 	.evts = clk_events,
1064 	.num_events = ARRAY_SIZE(clk_events),
1065 };
1066 
1067 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
1068 {
1069 	int clkid, ret;
1070 	struct clock_info *cinfo;
1071 
1072 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
1073 		PROTOCOL_REV_MAJOR(ph->version), PROTOCOL_REV_MINOR(ph->version));
1074 
1075 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
1076 	if (!cinfo)
1077 		return -ENOMEM;
1078 
1079 	ret = scmi_clock_protocol_attributes_get(ph, cinfo);
1080 	if (ret)
1081 		return ret;
1082 
1083 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
1084 				  sizeof(*cinfo->clk), GFP_KERNEL);
1085 	if (!cinfo->clk)
1086 		return -ENOMEM;
1087 
1088 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
1089 		struct scmi_clock_info *clk = cinfo->clk + clkid;
1090 
1091 		ret = scmi_clock_attributes_get(ph, clkid, cinfo);
1092 		if (!ret)
1093 			scmi_clock_describe_rates_get(ph, clkid, clk);
1094 	}
1095 
1096 	if (PROTOCOL_REV_MAJOR(ph->version) >= 0x3) {
1097 		cinfo->clock_config_set = scmi_clock_config_set_v2;
1098 		cinfo->clock_config_get = scmi_clock_config_get_v2;
1099 	} else {
1100 		cinfo->clock_config_set = scmi_clock_config_set;
1101 		cinfo->clock_config_get = scmi_clock_config_get;
1102 	}
1103 
1104 	return ph->set_priv(ph, cinfo);
1105 }
1106 
1107 static const struct scmi_protocol scmi_clock = {
1108 	.id = SCMI_PROTOCOL_CLOCK,
1109 	.owner = THIS_MODULE,
1110 	.instance_init = &scmi_clock_protocol_init,
1111 	.ops = &clk_proto_ops,
1112 	.events = &clk_protocol_events,
1113 	.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1114 };
1115 
1116 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
1117