xref: /linux/drivers/firmware/arm_scmi/perf.c (revision 24a0ffefe3f097aa8fe6997a731a71487dd0721f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Performance Protocol
4  *
5  * Copyright (C) 2018-2023 ARM Ltd.
6  */
7 
8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
9 
10 #include <linux/bits.h>
11 #include <linux/hashtable.h>
12 #include <linux/io.h>
13 #include <linux/log2.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_opp.h>
18 #include <linux/scmi_protocol.h>
19 #include <linux/sort.h>
20 #include <linux/xarray.h>
21 
22 #include <trace/events/scmi.h>
23 
24 #include "protocols.h"
25 #include "notify.h"
26 
27 /* Updated only after ALL the mandatory features for that version are merged */
28 #define SCMI_PROTOCOL_SUPPORTED_VERSION		0x40000
29 
30 #define MAX_OPPS		64
31 
32 enum scmi_performance_protocol_cmd {
33 	PERF_DOMAIN_ATTRIBUTES = 0x3,
34 	PERF_DESCRIBE_LEVELS = 0x4,
35 	PERF_LIMITS_SET = 0x5,
36 	PERF_LIMITS_GET = 0x6,
37 	PERF_LEVEL_SET = 0x7,
38 	PERF_LEVEL_GET = 0x8,
39 	PERF_NOTIFY_LIMITS = 0x9,
40 	PERF_NOTIFY_LEVEL = 0xa,
41 	PERF_DESCRIBE_FASTCHANNEL = 0xb,
42 	PERF_DOMAIN_NAME_GET = 0xc,
43 };
44 
45 enum {
46 	PERF_FC_LEVEL,
47 	PERF_FC_LIMIT,
48 	PERF_FC_MAX,
49 };
50 
51 struct scmi_opp {
52 	u32 perf;
53 	u32 power;
54 	u32 trans_latency_us;
55 	u32 indicative_freq;
56 	u32 level_index;
57 	struct hlist_node hash;
58 };
59 
60 struct scmi_msg_resp_perf_attributes {
61 	__le16 num_domains;
62 	__le16 flags;
63 #define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
64 #define POWER_SCALE_IN_MICROWATT(x)	((x) & BIT(1))
65 	__le32 stats_addr_low;
66 	__le32 stats_addr_high;
67 	__le32 stats_size;
68 };
69 
70 struct scmi_msg_resp_perf_domain_attributes {
71 	__le32 flags;
72 #define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
73 #define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
74 #define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
75 #define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
76 #define SUPPORTS_PERF_FASTCHANNELS(x)	((x) & BIT(27))
77 #define SUPPORTS_EXTENDED_NAMES(x)	((x) & BIT(26))
78 #define SUPPORTS_LEVEL_INDEXING(x)	((x) & BIT(25))
79 	__le32 rate_limit_us;
80 	__le32 sustained_freq_khz;
81 	__le32 sustained_perf_level;
82 	    u8 name[SCMI_SHORT_NAME_MAX_SIZE];
83 };
84 
85 struct scmi_msg_perf_describe_levels {
86 	__le32 domain;
87 	__le32 level_index;
88 };
89 
90 struct scmi_perf_set_limits {
91 	__le32 domain;
92 	__le32 max_level;
93 	__le32 min_level;
94 };
95 
96 struct scmi_perf_get_limits {
97 	__le32 max_level;
98 	__le32 min_level;
99 };
100 
101 struct scmi_perf_set_level {
102 	__le32 domain;
103 	__le32 level;
104 };
105 
106 struct scmi_perf_notify_level_or_limits {
107 	__le32 domain;
108 	__le32 notify_enable;
109 };
110 
111 struct scmi_perf_limits_notify_payld {
112 	__le32 agent_id;
113 	__le32 domain_id;
114 	__le32 range_max;
115 	__le32 range_min;
116 };
117 
118 struct scmi_perf_level_notify_payld {
119 	__le32 agent_id;
120 	__le32 domain_id;
121 	__le32 performance_level;
122 };
123 
124 struct scmi_msg_resp_perf_describe_levels {
125 	__le16 num_returned;
126 	__le16 num_remaining;
127 	struct {
128 		__le32 perf_val;
129 		__le32 power;
130 		__le16 transition_latency_us;
131 		__le16 reserved;
132 	} opp[];
133 };
134 
135 struct scmi_msg_resp_perf_describe_levels_v4 {
136 	__le16 num_returned;
137 	__le16 num_remaining;
138 	struct {
139 		__le32 perf_val;
140 		__le32 power;
141 		__le16 transition_latency_us;
142 		__le16 reserved;
143 		__le32 indicative_freq;
144 		__le32 level_index;
145 	} opp[];
146 };
147 
148 struct perf_dom_info {
149 	u32 id;
150 	bool set_limits;
151 	bool perf_limit_notify;
152 	bool perf_level_notify;
153 	bool perf_fastchannels;
154 	bool level_indexing_mode;
155 	u32 opp_count;
156 	u32 rate_limit_us;
157 	u32 sustained_freq_khz;
158 	u32 sustained_perf_level;
159 	unsigned long mult_factor;
160 	struct scmi_perf_domain_info info;
161 	struct scmi_opp opp[MAX_OPPS];
162 	struct scmi_fc_info *fc_info;
163 	struct xarray opps_by_idx;
164 	struct xarray opps_by_lvl;
165 	DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
166 };
167 
168 #define LOOKUP_BY_FREQ(__htp, __freq)					\
169 ({									\
170 		/* u32 cast is needed to pick right hash func */	\
171 		u32 f_ = (u32)(__freq);					\
172 		struct scmi_opp *_opp;					\
173 									\
174 		hash_for_each_possible((__htp), _opp, hash, f_)		\
175 			if (_opp->indicative_freq == f_)		\
176 				break;					\
177 		_opp;							\
178 })
179 
180 struct scmi_perf_info {
181 	u16 num_domains;
182 	enum scmi_power_scale power_scale;
183 	u64 stats_addr;
184 	u32 stats_size;
185 	bool notify_lvl_cmd;
186 	bool notify_lim_cmd;
187 	struct perf_dom_info *dom_info;
188 };
189 
190 static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
191 	PERF_NOTIFY_LIMITS,
192 	PERF_NOTIFY_LEVEL,
193 };
194 
195 static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
196 				    struct scmi_perf_info *pi)
197 {
198 	int ret;
199 	struct scmi_xfer *t;
200 	struct scmi_msg_resp_perf_attributes *attr;
201 
202 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
203 				      sizeof(*attr), &t);
204 	if (ret)
205 		return ret;
206 
207 	attr = t->rx.buf;
208 
209 	ret = ph->xops->do_xfer(ph, t);
210 	if (!ret) {
211 		u16 flags = le16_to_cpu(attr->flags);
212 
213 		pi->num_domains = le16_to_cpu(attr->num_domains);
214 
215 		if (POWER_SCALE_IN_MILLIWATT(flags))
216 			pi->power_scale = SCMI_POWER_MILLIWATTS;
217 		if (PROTOCOL_REV_MAJOR(ph->version) >= 0x3)
218 			if (POWER_SCALE_IN_MICROWATT(flags))
219 				pi->power_scale = SCMI_POWER_MICROWATTS;
220 
221 		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
222 				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
223 		pi->stats_size = le32_to_cpu(attr->stats_size);
224 	}
225 
226 	ph->xops->xfer_put(ph, t);
227 
228 	if (!ret) {
229 		if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
230 			pi->notify_lvl_cmd = true;
231 
232 		if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
233 			pi->notify_lim_cmd = true;
234 	}
235 
236 	return ret;
237 }
238 
239 static void scmi_perf_xa_destroy(void *data)
240 {
241 	int domain;
242 	struct scmi_perf_info *pinfo = data;
243 
244 	for (domain = 0; domain < pinfo->num_domains; domain++) {
245 		xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
246 		xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
247 	}
248 }
249 
250 static int
251 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
252 				struct perf_dom_info *dom_info,
253 				bool notify_lim_cmd, bool notify_lvl_cmd)
254 {
255 	int ret;
256 	u32 flags;
257 	struct scmi_xfer *t;
258 	struct scmi_msg_resp_perf_domain_attributes *attr;
259 
260 	ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
261 				      sizeof(dom_info->id), sizeof(*attr), &t);
262 	if (ret)
263 		return ret;
264 
265 	put_unaligned_le32(dom_info->id, t->tx.buf);
266 	attr = t->rx.buf;
267 
268 	ret = ph->xops->do_xfer(ph, t);
269 	if (!ret) {
270 		flags = le32_to_cpu(attr->flags);
271 
272 		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
273 		dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
274 		if (notify_lim_cmd)
275 			dom_info->perf_limit_notify =
276 				SUPPORTS_PERF_LIMIT_NOTIFY(flags);
277 		if (notify_lvl_cmd)
278 			dom_info->perf_level_notify =
279 				SUPPORTS_PERF_LEVEL_NOTIFY(flags);
280 		dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
281 		if (PROTOCOL_REV_MAJOR(ph->version) >= 0x4)
282 			dom_info->level_indexing_mode =
283 				SUPPORTS_LEVEL_INDEXING(flags);
284 		dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) &
285 						GENMASK(19, 0);
286 		dom_info->sustained_freq_khz =
287 					le32_to_cpu(attr->sustained_freq_khz);
288 		dom_info->sustained_perf_level =
289 					le32_to_cpu(attr->sustained_perf_level);
290 		/*
291 		 * sustained_freq_khz = mult_factor * sustained_perf_level
292 		 * mult_factor must be non zero positive integer(not fraction)
293 		 */
294 		if (!dom_info->sustained_freq_khz ||
295 		    !dom_info->sustained_perf_level ||
296 		    dom_info->level_indexing_mode) {
297 			/* CPUFreq converts to kHz, hence default 1000 */
298 			dom_info->mult_factor =	1000;
299 		} else {
300 			dom_info->mult_factor =
301 					(dom_info->sustained_freq_khz * 1000UL)
302 					/ dom_info->sustained_perf_level;
303 			if ((dom_info->sustained_freq_khz * 1000UL) %
304 			    dom_info->sustained_perf_level)
305 				dev_warn(ph->dev,
306 					 "multiplier for domain %d rounded\n",
307 					 dom_info->id);
308 		}
309 		if (!dom_info->mult_factor)
310 			dev_warn(ph->dev,
311 				 "Wrong sustained perf/frequency(domain %d)\n",
312 				 dom_info->id);
313 
314 		strscpy(dom_info->info.name, attr->name,
315 			SCMI_SHORT_NAME_MAX_SIZE);
316 	}
317 
318 	ph->xops->xfer_put(ph, t);
319 
320 	/*
321 	 * If supported overwrite short name with the extended one;
322 	 * on error just carry on and use already provided short name.
323 	 */
324 	if (!ret && PROTOCOL_REV_MAJOR(ph->version) >= 0x3 &&
325 	    SUPPORTS_EXTENDED_NAMES(flags))
326 		ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
327 					    dom_info->id, NULL, dom_info->info.name,
328 					    SCMI_MAX_STR_SIZE);
329 
330 	xa_init(&dom_info->opps_by_lvl);
331 	if (dom_info->level_indexing_mode) {
332 		xa_init(&dom_info->opps_by_idx);
333 		hash_init(dom_info->opps_by_freq);
334 	}
335 
336 	return ret;
337 }
338 
339 static int opp_cmp_func(const void *opp1, const void *opp2)
340 {
341 	const struct scmi_opp *t1 = opp1, *t2 = opp2;
342 
343 	return t1->perf - t2->perf;
344 }
345 
346 static void iter_perf_levels_prepare_message(void *message,
347 					     unsigned int desc_index,
348 					     const void *priv)
349 {
350 	struct scmi_msg_perf_describe_levels *msg = message;
351 	const struct perf_dom_info *perf_dom = priv;
352 
353 	msg->domain = cpu_to_le32(perf_dom->id);
354 	/* Set the number of OPPs to be skipped/already read */
355 	msg->level_index = cpu_to_le32(desc_index);
356 }
357 
358 static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
359 					 const void *response, void *priv)
360 {
361 	const struct scmi_msg_resp_perf_describe_levels *r = response;
362 
363 	st->num_returned = le16_to_cpu(r->num_returned);
364 	st->num_remaining = le16_to_cpu(r->num_remaining);
365 
366 	return 0;
367 }
368 
369 static inline int
370 process_response_opp(struct device *dev, struct perf_dom_info *dom,
371 		     struct scmi_opp *opp, unsigned int loop_idx,
372 		     const struct scmi_msg_resp_perf_describe_levels *r)
373 {
374 	int ret;
375 
376 	opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
377 	opp->power = le32_to_cpu(r->opp[loop_idx].power);
378 	opp->trans_latency_us =
379 		le16_to_cpu(r->opp[loop_idx].transition_latency_us);
380 
381 	ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
382 	if (ret) {
383 		dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
384 			 opp->perf, dom->info.name, ret);
385 		return ret;
386 	}
387 
388 	return 0;
389 }
390 
391 static inline int
392 process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
393 			struct scmi_opp *opp, unsigned int loop_idx,
394 			const struct scmi_msg_resp_perf_describe_levels_v4 *r)
395 {
396 	int ret;
397 
398 	opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
399 	opp->power = le32_to_cpu(r->opp[loop_idx].power);
400 	opp->trans_latency_us =
401 		le16_to_cpu(r->opp[loop_idx].transition_latency_us);
402 
403 	ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
404 	if (ret) {
405 		dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
406 			 opp->perf, dom->info.name, ret);
407 		return ret;
408 	}
409 
410 	/* Note that PERF v4 reports always five 32-bit words */
411 	opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
412 	if (dom->level_indexing_mode) {
413 		opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
414 
415 		ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
416 				GFP_KERNEL);
417 		if (ret) {
418 			dev_warn(dev,
419 				 "Failed to add opps_by_idx at %d for %s - ret:%d\n",
420 				 opp->level_index, dom->info.name, ret);
421 
422 			/* Cleanup by_lvl too */
423 			xa_erase(&dom->opps_by_lvl, opp->perf);
424 
425 			return ret;
426 		}
427 
428 		hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
429 	}
430 
431 	return 0;
432 }
433 
434 static int
435 iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
436 				  const void *response,
437 				  struct scmi_iterator_state *st, void *priv)
438 {
439 	int ret;
440 	struct scmi_opp *opp;
441 	struct perf_dom_info *perf_dom = priv;
442 
443 	opp = &perf_dom->opp[perf_dom->opp_count];
444 	if (PROTOCOL_REV_MAJOR(ph->version) <= 0x3)
445 		ret = process_response_opp(ph->dev, perf_dom, opp,
446 					   st->loop_idx, response);
447 	else
448 		ret = process_response_opp_v4(ph->dev, perf_dom, opp,
449 					      st->loop_idx, response);
450 
451 	/* Skip BAD duplicates received from firmware */
452 	if (ret)
453 		return ret == -EBUSY ? 0 : ret;
454 
455 	perf_dom->opp_count++;
456 
457 	dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
458 		opp->perf, opp->power, opp->trans_latency_us,
459 		opp->indicative_freq, opp->level_index);
460 
461 	return 0;
462 }
463 
464 static int
465 scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
466 			      struct perf_dom_info *perf_dom)
467 {
468 	int ret;
469 	void *iter;
470 	struct scmi_iterator_ops ops = {
471 		.prepare_message = iter_perf_levels_prepare_message,
472 		.update_state = iter_perf_levels_update_state,
473 		.process_response = iter_perf_levels_process_response,
474 	};
475 
476 	iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
477 					    PERF_DESCRIBE_LEVELS,
478 					    sizeof(struct scmi_msg_perf_describe_levels),
479 					    perf_dom);
480 	if (IS_ERR(iter))
481 		return PTR_ERR(iter);
482 
483 	ret = ph->hops->iter_response_run(iter);
484 	if (ret)
485 		return ret;
486 
487 	if (perf_dom->opp_count)
488 		sort(perf_dom->opp, perf_dom->opp_count,
489 		     sizeof(struct scmi_opp), opp_cmp_func, NULL);
490 
491 	return ret;
492 }
493 
494 static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
495 {
496 	struct scmi_perf_info *pi = ph->get_priv(ph);
497 
498 	return pi->num_domains;
499 }
500 
501 static inline struct perf_dom_info *
502 scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
503 {
504 	struct scmi_perf_info *pi = ph->get_priv(ph);
505 
506 	if (domain >= pi->num_domains)
507 		return ERR_PTR(-EINVAL);
508 
509 	return pi->dom_info + domain;
510 }
511 
512 static const struct scmi_perf_domain_info *
513 scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
514 {
515 	struct perf_dom_info *dom;
516 
517 	dom = scmi_perf_domain_lookup(ph, domain);
518 	if (IS_ERR(dom))
519 		return ERR_PTR(-EINVAL);
520 
521 	return &dom->info;
522 }
523 
524 static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
525 				    u32 domain, u32 max_perf, u32 min_perf)
526 {
527 	int ret;
528 	struct scmi_xfer *t;
529 	struct scmi_perf_set_limits *limits;
530 
531 	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
532 				      sizeof(*limits), 0, &t);
533 	if (ret)
534 		return ret;
535 
536 	limits = t->tx.buf;
537 	limits->domain = cpu_to_le32(domain);
538 	limits->max_level = cpu_to_le32(max_perf);
539 	limits->min_level = cpu_to_le32(min_perf);
540 
541 	ret = ph->xops->do_xfer(ph, t);
542 
543 	ph->xops->xfer_put(ph, t);
544 	return ret;
545 }
546 
547 static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
548 				  struct perf_dom_info *dom, u32 max_perf,
549 				  u32 min_perf)
550 {
551 	if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
552 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
553 
554 		trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
555 				   dom->id, min_perf, max_perf);
556 		iowrite32(max_perf, fci->set_addr);
557 		iowrite32(min_perf, fci->set_addr + 4);
558 		ph->hops->fastchannel_db_ring(fci->set_db);
559 		return 0;
560 	}
561 
562 	return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
563 }
564 
565 static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
566 				u32 domain, u32 max_perf, u32 min_perf)
567 {
568 	struct perf_dom_info *dom;
569 
570 	dom = scmi_perf_domain_lookup(ph, domain);
571 	if (IS_ERR(dom))
572 		return PTR_ERR(dom);
573 
574 	if (!dom->set_limits)
575 		return -EOPNOTSUPP;
576 
577 	if (PROTOCOL_REV_MAJOR(ph->version) >= 0x3 && !max_perf && !min_perf)
578 		return -EINVAL;
579 
580 	if (dom->level_indexing_mode) {
581 		struct scmi_opp *opp;
582 
583 		if (min_perf) {
584 			opp = xa_load(&dom->opps_by_lvl, min_perf);
585 			if (!opp)
586 				return -EIO;
587 
588 			min_perf = opp->level_index;
589 		}
590 
591 		if (max_perf) {
592 			opp = xa_load(&dom->opps_by_lvl, max_perf);
593 			if (!opp)
594 				return -EIO;
595 
596 			max_perf = opp->level_index;
597 		}
598 	}
599 
600 	return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
601 }
602 
603 static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
604 				    u32 domain, u32 *max_perf, u32 *min_perf)
605 {
606 	int ret;
607 	struct scmi_xfer *t;
608 	struct scmi_perf_get_limits *limits;
609 
610 	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
611 				      sizeof(__le32), 0, &t);
612 	if (ret)
613 		return ret;
614 
615 	put_unaligned_le32(domain, t->tx.buf);
616 
617 	ret = ph->xops->do_xfer(ph, t);
618 	if (!ret) {
619 		limits = t->rx.buf;
620 
621 		*max_perf = le32_to_cpu(limits->max_level);
622 		*min_perf = le32_to_cpu(limits->min_level);
623 	}
624 
625 	ph->xops->xfer_put(ph, t);
626 	return ret;
627 }
628 
629 static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
630 				  struct perf_dom_info *dom, u32 *max_perf,
631 				  u32 *min_perf)
632 {
633 	if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
634 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
635 
636 		*max_perf = ioread32(fci->get_addr);
637 		*min_perf = ioread32(fci->get_addr + 4);
638 		trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
639 				   dom->id, *min_perf, *max_perf);
640 		return 0;
641 	}
642 
643 	return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
644 }
645 
646 static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
647 				u32 domain, u32 *max_perf, u32 *min_perf)
648 {
649 	int ret;
650 	struct perf_dom_info *dom;
651 
652 	dom = scmi_perf_domain_lookup(ph, domain);
653 	if (IS_ERR(dom))
654 		return PTR_ERR(dom);
655 
656 	ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
657 	if (ret)
658 		return ret;
659 
660 	if (dom->level_indexing_mode) {
661 		struct scmi_opp *opp;
662 
663 		opp = xa_load(&dom->opps_by_idx, *min_perf);
664 		if (!opp)
665 			return -EIO;
666 
667 		*min_perf = opp->perf;
668 
669 		opp = xa_load(&dom->opps_by_idx, *max_perf);
670 		if (!opp)
671 			return -EIO;
672 
673 		*max_perf = opp->perf;
674 	}
675 
676 	return 0;
677 }
678 
679 static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
680 				   u32 domain, u32 level, bool poll)
681 {
682 	int ret;
683 	struct scmi_xfer *t;
684 	struct scmi_perf_set_level *lvl;
685 
686 	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
687 	if (ret)
688 		return ret;
689 
690 	t->hdr.poll_completion = poll;
691 	lvl = t->tx.buf;
692 	lvl->domain = cpu_to_le32(domain);
693 	lvl->level = cpu_to_le32(level);
694 
695 	ret = ph->xops->do_xfer(ph, t);
696 
697 	ph->xops->xfer_put(ph, t);
698 	return ret;
699 }
700 
701 static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
702 				 struct perf_dom_info *dom, u32 level,
703 				 bool poll)
704 {
705 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
706 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
707 
708 		trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
709 				   dom->id, level, 0);
710 		iowrite32(level, fci->set_addr);
711 		ph->hops->fastchannel_db_ring(fci->set_db);
712 		return 0;
713 	}
714 
715 	return scmi_perf_msg_level_set(ph, dom->id, level, poll);
716 }
717 
718 static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
719 			       u32 domain, u32 level, bool poll)
720 {
721 	struct perf_dom_info *dom;
722 
723 	dom = scmi_perf_domain_lookup(ph, domain);
724 	if (IS_ERR(dom))
725 		return PTR_ERR(dom);
726 
727 	if (!dom->info.set_perf)
728 		return -EOPNOTSUPP;
729 
730 	if (dom->level_indexing_mode) {
731 		struct scmi_opp *opp;
732 
733 		opp = xa_load(&dom->opps_by_lvl, level);
734 		if (!opp)
735 			return -EIO;
736 
737 		level = opp->level_index;
738 	}
739 
740 	return __scmi_perf_level_set(ph, dom, level, poll);
741 }
742 
743 static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
744 				   u32 domain, u32 *level, bool poll)
745 {
746 	int ret;
747 	struct scmi_xfer *t;
748 
749 	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
750 				     sizeof(u32), sizeof(u32), &t);
751 	if (ret)
752 		return ret;
753 
754 	t->hdr.poll_completion = poll;
755 	put_unaligned_le32(domain, t->tx.buf);
756 
757 	ret = ph->xops->do_xfer(ph, t);
758 	if (!ret)
759 		*level = get_unaligned_le32(t->rx.buf);
760 
761 	ph->xops->xfer_put(ph, t);
762 	return ret;
763 }
764 
765 static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
766 				 struct perf_dom_info *dom, u32 *level,
767 				 bool poll)
768 {
769 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
770 		*level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
771 		trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
772 				   dom->id, *level, 0);
773 		return 0;
774 	}
775 
776 	return scmi_perf_msg_level_get(ph, dom->id, level, poll);
777 }
778 
779 static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
780 			       u32 domain, u32 *level, bool poll)
781 {
782 	int ret;
783 	struct perf_dom_info *dom;
784 
785 	dom = scmi_perf_domain_lookup(ph, domain);
786 	if (IS_ERR(dom))
787 		return PTR_ERR(dom);
788 
789 	ret = __scmi_perf_level_get(ph, dom, level, poll);
790 	if (ret)
791 		return ret;
792 
793 	if (dom->level_indexing_mode) {
794 		struct scmi_opp *opp;
795 
796 		opp = xa_load(&dom->opps_by_idx, *level);
797 		if (!opp)
798 			return -EIO;
799 
800 		*level = opp->perf;
801 	}
802 
803 	return 0;
804 }
805 
806 static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
807 					 u32 domain, int message_id,
808 					 bool enable)
809 {
810 	int ret;
811 	struct scmi_xfer *t;
812 	struct scmi_perf_notify_level_or_limits *notify;
813 
814 	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
815 	if (ret)
816 		return ret;
817 
818 	notify = t->tx.buf;
819 	notify->domain = cpu_to_le32(domain);
820 	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
821 
822 	ret = ph->xops->do_xfer(ph, t);
823 
824 	ph->xops->xfer_put(ph, t);
825 	return ret;
826 }
827 
828 static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
829 				     struct perf_dom_info *dom)
830 {
831 	struct scmi_fc_info *fc;
832 
833 	fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
834 	if (!fc)
835 		return;
836 
837 	ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
838 				   PERF_LEVEL_GET, 4, dom->id,
839 				   &fc[PERF_FC_LEVEL].get_addr, NULL,
840 				   &fc[PERF_FC_LEVEL].rate_limit);
841 
842 	ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
843 				   PERF_LIMITS_GET, 8, dom->id,
844 				   &fc[PERF_FC_LIMIT].get_addr, NULL,
845 				   &fc[PERF_FC_LIMIT].rate_limit);
846 
847 	if (dom->info.set_perf)
848 		ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
849 					   PERF_LEVEL_SET, 4, dom->id,
850 					   &fc[PERF_FC_LEVEL].set_addr,
851 					   &fc[PERF_FC_LEVEL].set_db,
852 					   &fc[PERF_FC_LEVEL].rate_limit);
853 
854 	if (dom->set_limits)
855 		ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
856 					   PERF_LIMITS_SET, 8, dom->id,
857 					   &fc[PERF_FC_LIMIT].set_addr,
858 					   &fc[PERF_FC_LIMIT].set_db,
859 					   &fc[PERF_FC_LIMIT].rate_limit);
860 
861 	dom->fc_info = fc;
862 }
863 
864 static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
865 				     struct device *dev, u32 domain)
866 {
867 	int idx, ret;
868 	unsigned long freq;
869 	struct dev_pm_opp_data data = {};
870 	struct perf_dom_info *dom;
871 
872 	dom = scmi_perf_domain_lookup(ph, domain);
873 	if (IS_ERR(dom))
874 		return PTR_ERR(dom);
875 
876 	for (idx = 0; idx < dom->opp_count; idx++) {
877 		if (!dom->level_indexing_mode)
878 			freq = dom->opp[idx].perf * dom->mult_factor;
879 		else
880 			freq = dom->opp[idx].indicative_freq * dom->mult_factor;
881 
882 		/* All OPPs above the sustained frequency are treated as turbo */
883 		data.turbo = freq > dom->sustained_freq_khz * 1000UL;
884 
885 		data.level = dom->opp[idx].perf;
886 		data.freq = freq;
887 
888 		ret = dev_pm_opp_add_dynamic(dev, &data);
889 		if (ret) {
890 			dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
891 				 domain, dom->info.name, idx, freq);
892 			dev_pm_opp_remove_all_dynamic(dev);
893 			return ret;
894 		}
895 
896 		dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
897 			domain, dom->info.name, idx, freq);
898 	}
899 	return 0;
900 }
901 
902 static int
903 scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
904 				 u32 domain)
905 {
906 	struct perf_dom_info *dom;
907 
908 	dom = scmi_perf_domain_lookup(ph, domain);
909 	if (IS_ERR(dom))
910 		return PTR_ERR(dom);
911 
912 	/* uS to nS */
913 	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
914 }
915 
916 static int
917 scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle *ph,
918 			 u32 domain, u32 *rate_limit)
919 {
920 	struct perf_dom_info *dom;
921 
922 	if (!rate_limit)
923 		return -EINVAL;
924 
925 	dom = scmi_perf_domain_lookup(ph, domain);
926 	if (IS_ERR(dom))
927 		return PTR_ERR(dom);
928 
929 	*rate_limit = dom->rate_limit_us;
930 	return 0;
931 }
932 
933 static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
934 			      unsigned long freq, bool poll)
935 {
936 	unsigned int level;
937 	struct perf_dom_info *dom;
938 
939 	dom = scmi_perf_domain_lookup(ph, domain);
940 	if (IS_ERR(dom))
941 		return PTR_ERR(dom);
942 
943 	if (!dom->level_indexing_mode) {
944 		level = freq / dom->mult_factor;
945 	} else {
946 		struct scmi_opp *opp;
947 
948 		opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
949 				     freq / dom->mult_factor);
950 		if (!opp)
951 			return -EIO;
952 
953 		level = opp->level_index;
954 	}
955 
956 	return __scmi_perf_level_set(ph, dom, level, poll);
957 }
958 
959 static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
960 			      unsigned long *freq, bool poll)
961 {
962 	int ret;
963 	u32 level;
964 	struct perf_dom_info *dom;
965 
966 	dom = scmi_perf_domain_lookup(ph, domain);
967 	if (IS_ERR(dom))
968 		return PTR_ERR(dom);
969 
970 	ret = __scmi_perf_level_get(ph, dom, &level, poll);
971 	if (ret)
972 		return ret;
973 
974 	if (!dom->level_indexing_mode) {
975 		*freq = level * dom->mult_factor;
976 	} else {
977 		struct scmi_opp *opp;
978 
979 		opp = xa_load(&dom->opps_by_idx, level);
980 		if (!opp)
981 			return -EIO;
982 
983 		*freq = opp->indicative_freq * dom->mult_factor;
984 	}
985 
986 	return ret;
987 }
988 
989 static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
990 				   u32 domain, unsigned long *freq,
991 				   unsigned long *power)
992 {
993 	struct perf_dom_info *dom;
994 	unsigned long opp_freq;
995 	int idx, ret = -EINVAL;
996 	struct scmi_opp *opp;
997 
998 	dom = scmi_perf_domain_lookup(ph, domain);
999 	if (IS_ERR(dom))
1000 		return PTR_ERR(dom);
1001 
1002 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
1003 		if (!dom->level_indexing_mode)
1004 			opp_freq = opp->perf * dom->mult_factor;
1005 		else
1006 			opp_freq = opp->indicative_freq * dom->mult_factor;
1007 
1008 		if (opp_freq < *freq)
1009 			continue;
1010 
1011 		*freq = opp_freq;
1012 		*power = opp->power;
1013 		ret = 0;
1014 		break;
1015 	}
1016 
1017 	return ret;
1018 }
1019 
1020 static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
1021 				      u32 domain)
1022 {
1023 	struct perf_dom_info *dom;
1024 
1025 	dom = scmi_perf_domain_lookup(ph, domain);
1026 	if (IS_ERR(dom))
1027 		return false;
1028 
1029 	return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
1030 }
1031 
1032 static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle *ph,
1033 				       u32 domain, u32 *rate_limit)
1034 {
1035 	struct perf_dom_info *dom;
1036 
1037 	if (!rate_limit)
1038 		return -EINVAL;
1039 
1040 	dom = scmi_perf_domain_lookup(ph, domain);
1041 	if (IS_ERR(dom))
1042 		return PTR_ERR(dom);
1043 
1044 	if (!dom->fc_info)
1045 		return -EINVAL;
1046 
1047 	*rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit;
1048 	return 0;
1049 }
1050 
1051 static enum scmi_power_scale
1052 scmi_power_scale_get(const struct scmi_protocol_handle *ph)
1053 {
1054 	struct scmi_perf_info *pi = ph->get_priv(ph);
1055 
1056 	return pi->power_scale;
1057 }
1058 
1059 static const struct scmi_perf_proto_ops perf_proto_ops = {
1060 	.num_domains_get = scmi_perf_num_domains_get,
1061 	.info_get = scmi_perf_info_get,
1062 	.limits_set = scmi_perf_limits_set,
1063 	.limits_get = scmi_perf_limits_get,
1064 	.level_set = scmi_perf_level_set,
1065 	.level_get = scmi_perf_level_get,
1066 	.transition_latency_get = scmi_dvfs_transition_latency_get,
1067 	.rate_limit_get = scmi_dvfs_rate_limit_get,
1068 	.device_opps_add = scmi_dvfs_device_opps_add,
1069 	.freq_set = scmi_dvfs_freq_set,
1070 	.freq_get = scmi_dvfs_freq_get,
1071 	.est_power_get = scmi_dvfs_est_power_get,
1072 	.fast_switch_possible = scmi_fast_switch_possible,
1073 	.fast_switch_rate_limit = scmi_fast_switch_rate_limit,
1074 	.power_scale_get = scmi_power_scale_get,
1075 };
1076 
1077 static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
1078 				       u8 evt_id, u32 src_id)
1079 {
1080 	bool supported;
1081 	struct perf_dom_info *dom;
1082 
1083 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
1084 		return false;
1085 
1086 	dom = scmi_perf_domain_lookup(ph, src_id);
1087 	if (IS_ERR(dom))
1088 		return false;
1089 
1090 	if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
1091 		supported = dom->perf_limit_notify;
1092 	else
1093 		supported = dom->perf_level_notify;
1094 
1095 	return supported;
1096 }
1097 
1098 static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
1099 					u8 evt_id, u32 src_id, bool enable)
1100 {
1101 	int ret, cmd_id;
1102 
1103 	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
1104 		return -EINVAL;
1105 
1106 	cmd_id = evt_2_cmd[evt_id];
1107 	ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
1108 	if (ret)
1109 		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
1110 			 evt_id, src_id, ret);
1111 
1112 	return ret;
1113 }
1114 
1115 static int
1116 scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
1117 			    unsigned int index, unsigned long *freq)
1118 {
1119 	struct scmi_opp *opp;
1120 
1121 	if (!dom || !freq)
1122 		return -EINVAL;
1123 
1124 	if (!dom->level_indexing_mode) {
1125 		opp = xa_load(&dom->opps_by_lvl, index);
1126 		if (!opp)
1127 			return -ENODEV;
1128 
1129 		*freq = opp->perf * dom->mult_factor;
1130 	} else {
1131 		opp = xa_load(&dom->opps_by_idx, index);
1132 		if (!opp)
1133 			return -ENODEV;
1134 
1135 		*freq = opp->indicative_freq * dom->mult_factor;
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
1142 					  u8 evt_id, ktime_t timestamp,
1143 					  const void *payld, size_t payld_sz,
1144 					  void *report, u32 *src_id)
1145 {
1146 	int ret;
1147 	void *rep = NULL;
1148 	struct perf_dom_info *dom;
1149 
1150 	switch (evt_id) {
1151 	case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
1152 	{
1153 		const struct scmi_perf_limits_notify_payld *p = payld;
1154 		struct scmi_perf_limits_report *r = report;
1155 		unsigned long freq_min, freq_max;
1156 
1157 		if (sizeof(*p) != payld_sz)
1158 			break;
1159 
1160 		r->timestamp = timestamp;
1161 		r->agent_id = le32_to_cpu(p->agent_id);
1162 		r->domain_id = le32_to_cpu(p->domain_id);
1163 		r->range_max = le32_to_cpu(p->range_max);
1164 		r->range_min = le32_to_cpu(p->range_min);
1165 		/* Check if the reported domain exist at all */
1166 		dom = scmi_perf_domain_lookup(ph, r->domain_id);
1167 		if (IS_ERR(dom))
1168 			break;
1169 		/*
1170 		 * Event will be reported from this point on...
1171 		 * ...even if, later, xlated frequencies were not retrieved.
1172 		 */
1173 		*src_id = r->domain_id;
1174 		rep = r;
1175 
1176 		ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
1177 		if (ret)
1178 			break;
1179 
1180 		ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
1181 		if (ret)
1182 			break;
1183 
1184 		/* Report translated freqs ONLY if both available */
1185 		r->range_max_freq = freq_max;
1186 		r->range_min_freq = freq_min;
1187 
1188 		break;
1189 	}
1190 	case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
1191 	{
1192 		const struct scmi_perf_level_notify_payld *p = payld;
1193 		struct scmi_perf_level_report *r = report;
1194 		unsigned long freq;
1195 
1196 		if (sizeof(*p) != payld_sz)
1197 			break;
1198 
1199 		r->timestamp = timestamp;
1200 		r->agent_id = le32_to_cpu(p->agent_id);
1201 		r->domain_id = le32_to_cpu(p->domain_id);
1202 		/* Report translated freqs ONLY if available */
1203 		r->performance_level = le32_to_cpu(p->performance_level);
1204 		/* Check if the reported domain exist at all */
1205 		dom = scmi_perf_domain_lookup(ph, r->domain_id);
1206 		if (IS_ERR(dom))
1207 			break;
1208 		/*
1209 		 * Event will be reported from this point on...
1210 		 * ...even if, later, xlated frequencies were not retrieved.
1211 		 */
1212 		*src_id = r->domain_id;
1213 		rep = r;
1214 
1215 		/* Report translated freqs ONLY if available */
1216 		ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
1217 						  &freq);
1218 		if (ret)
1219 			break;
1220 
1221 		r->performance_level_freq = freq;
1222 
1223 		break;
1224 	}
1225 	default:
1226 		break;
1227 	}
1228 
1229 	return rep;
1230 }
1231 
1232 static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
1233 {
1234 	struct scmi_perf_info *pi = ph->get_priv(ph);
1235 
1236 	if (!pi)
1237 		return -EINVAL;
1238 
1239 	return pi->num_domains;
1240 }
1241 
1242 static const struct scmi_event perf_events[] = {
1243 	{
1244 		.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
1245 		.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
1246 		.max_report_sz = sizeof(struct scmi_perf_limits_report),
1247 	},
1248 	{
1249 		.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
1250 		.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
1251 		.max_report_sz = sizeof(struct scmi_perf_level_report),
1252 	},
1253 };
1254 
1255 static const struct scmi_event_ops perf_event_ops = {
1256 	.is_notify_supported = scmi_perf_notify_supported,
1257 	.get_num_sources = scmi_perf_get_num_sources,
1258 	.set_notify_enabled = scmi_perf_set_notify_enabled,
1259 	.fill_custom_report = scmi_perf_fill_custom_report,
1260 };
1261 
1262 static const struct scmi_protocol_events perf_protocol_events = {
1263 	.queue_sz = SCMI_PROTO_QUEUE_SZ,
1264 	.ops = &perf_event_ops,
1265 	.evts = perf_events,
1266 	.num_events = ARRAY_SIZE(perf_events),
1267 };
1268 
1269 static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
1270 {
1271 	int domain, ret;
1272 	struct scmi_perf_info *pinfo;
1273 
1274 	dev_dbg(ph->dev, "Performance Version %d.%d\n",
1275 		PROTOCOL_REV_MAJOR(ph->version), PROTOCOL_REV_MINOR(ph->version));
1276 
1277 	pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
1278 	if (!pinfo)
1279 		return -ENOMEM;
1280 
1281 	ret = scmi_perf_attributes_get(ph, pinfo);
1282 	if (ret)
1283 		return ret;
1284 
1285 	pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
1286 				       sizeof(*pinfo->dom_info), GFP_KERNEL);
1287 	if (!pinfo->dom_info)
1288 		return -ENOMEM;
1289 
1290 	for (domain = 0; domain < pinfo->num_domains; domain++) {
1291 		struct perf_dom_info *dom = pinfo->dom_info + domain;
1292 
1293 		dom->id = domain;
1294 		scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
1295 						pinfo->notify_lvl_cmd);
1296 		scmi_perf_describe_levels_get(ph, dom);
1297 
1298 		if (dom->perf_fastchannels)
1299 			scmi_perf_domain_init_fc(ph, dom);
1300 	}
1301 
1302 	ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
1303 	if (ret)
1304 		return ret;
1305 
1306 	return ph->set_priv(ph, pinfo);
1307 }
1308 
1309 static const struct scmi_protocol scmi_perf = {
1310 	.id = SCMI_PROTOCOL_PERF,
1311 	.owner = THIS_MODULE,
1312 	.instance_init = &scmi_perf_protocol_init,
1313 	.ops = &perf_proto_ops,
1314 	.events = &perf_protocol_events,
1315 	.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1316 };
1317 
1318 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
1319