Lines Matching +full:tx +full:- +full:freq
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018-2023 ARM Ltd.
8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
175 if (_opp->indicative_freq == f_) \
203 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, in scmi_perf_attributes_get()
208 attr = t->rx.buf; in scmi_perf_attributes_get()
210 ret = ph->xops->do_xfer(ph, t); in scmi_perf_attributes_get()
212 u16 flags = le16_to_cpu(attr->flags); in scmi_perf_attributes_get()
214 pi->num_domains = le16_to_cpu(attr->num_domains); in scmi_perf_attributes_get()
217 pi->power_scale = SCMI_POWER_MILLIWATTS; in scmi_perf_attributes_get()
218 if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3) in scmi_perf_attributes_get()
220 pi->power_scale = SCMI_POWER_MICROWATTS; in scmi_perf_attributes_get()
222 pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | in scmi_perf_attributes_get()
223 (u64)le32_to_cpu(attr->stats_addr_high) << 32; in scmi_perf_attributes_get()
224 pi->stats_size = le32_to_cpu(attr->stats_size); in scmi_perf_attributes_get()
227 ph->xops->xfer_put(ph, t); in scmi_perf_attributes_get()
230 if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL)) in scmi_perf_attributes_get()
231 pi->notify_lvl_cmd = true; in scmi_perf_attributes_get()
233 if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL)) in scmi_perf_attributes_get()
234 pi->notify_lim_cmd = true; in scmi_perf_attributes_get()
245 for (domain = 0; domain < pinfo->num_domains; domain++) { in scmi_perf_xa_destroy()
246 xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx)); in scmi_perf_xa_destroy()
247 xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl)); in scmi_perf_xa_destroy()
262 ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES, in scmi_perf_domain_attributes_get()
263 sizeof(dom_info->id), sizeof(*attr), &t); in scmi_perf_domain_attributes_get()
267 put_unaligned_le32(dom_info->id, t->tx.buf); in scmi_perf_domain_attributes_get()
268 attr = t->rx.buf; in scmi_perf_domain_attributes_get()
270 ret = ph->xops->do_xfer(ph, t); in scmi_perf_domain_attributes_get()
272 flags = le32_to_cpu(attr->flags); in scmi_perf_domain_attributes_get()
274 dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); in scmi_perf_domain_attributes_get()
275 dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags); in scmi_perf_domain_attributes_get()
277 dom_info->perf_limit_notify = in scmi_perf_domain_attributes_get()
280 dom_info->perf_level_notify = in scmi_perf_domain_attributes_get()
282 dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); in scmi_perf_domain_attributes_get()
284 dom_info->level_indexing_mode = in scmi_perf_domain_attributes_get()
286 dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) & in scmi_perf_domain_attributes_get()
288 dom_info->sustained_freq_khz = in scmi_perf_domain_attributes_get()
289 le32_to_cpu(attr->sustained_freq_khz); in scmi_perf_domain_attributes_get()
290 dom_info->sustained_perf_level = in scmi_perf_domain_attributes_get()
291 le32_to_cpu(attr->sustained_perf_level); in scmi_perf_domain_attributes_get()
296 if (!dom_info->sustained_freq_khz || in scmi_perf_domain_attributes_get()
297 !dom_info->sustained_perf_level || in scmi_perf_domain_attributes_get()
298 dom_info->level_indexing_mode) { in scmi_perf_domain_attributes_get()
300 dom_info->mult_factor = 1000; in scmi_perf_domain_attributes_get()
302 dom_info->mult_factor = in scmi_perf_domain_attributes_get()
303 (dom_info->sustained_freq_khz * 1000UL) in scmi_perf_domain_attributes_get()
304 / dom_info->sustained_perf_level; in scmi_perf_domain_attributes_get()
305 if ((dom_info->sustained_freq_khz * 1000UL) % in scmi_perf_domain_attributes_get()
306 dom_info->sustained_perf_level) in scmi_perf_domain_attributes_get()
307 dev_warn(ph->dev, in scmi_perf_domain_attributes_get()
309 dom_info->id); in scmi_perf_domain_attributes_get()
311 if (!dom_info->mult_factor) in scmi_perf_domain_attributes_get()
312 dev_warn(ph->dev, in scmi_perf_domain_attributes_get()
314 dom_info->id); in scmi_perf_domain_attributes_get()
316 strscpy(dom_info->info.name, attr->name, in scmi_perf_domain_attributes_get()
320 ph->xops->xfer_put(ph, t); in scmi_perf_domain_attributes_get()
328 ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, in scmi_perf_domain_attributes_get()
329 dom_info->id, NULL, dom_info->info.name, in scmi_perf_domain_attributes_get()
332 xa_init(&dom_info->opps_by_lvl); in scmi_perf_domain_attributes_get()
333 if (dom_info->level_indexing_mode) { in scmi_perf_domain_attributes_get()
334 xa_init(&dom_info->opps_by_idx); in scmi_perf_domain_attributes_get()
335 hash_init(dom_info->opps_by_freq); in scmi_perf_domain_attributes_get()
345 return t1->perf - t2->perf; in opp_cmp_func()
360 msg->domain = cpu_to_le32(p->perf_dom->id); in iter_perf_levels_prepare_message()
362 msg->level_index = cpu_to_le32(desc_index); in iter_perf_levels_prepare_message()
370 st->num_returned = le16_to_cpu(r->num_returned); in iter_perf_levels_update_state()
371 st->num_remaining = le16_to_cpu(r->num_remaining); in iter_perf_levels_update_state()
383 opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); in process_response_opp()
384 opp->power = le32_to_cpu(r->opp[loop_idx].power); in process_response_opp()
385 opp->trans_latency_us = in process_response_opp()
386 le16_to_cpu(r->opp[loop_idx].transition_latency_us); in process_response_opp()
388 ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); in process_response_opp()
390 dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n", in process_response_opp()
391 opp->perf, dom->info.name, ret); in process_response_opp()
405 opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); in process_response_opp_v4()
406 opp->power = le32_to_cpu(r->opp[loop_idx].power); in process_response_opp_v4()
407 opp->trans_latency_us = in process_response_opp_v4()
408 le16_to_cpu(r->opp[loop_idx].transition_latency_us); in process_response_opp_v4()
410 ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); in process_response_opp_v4()
412 dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n", in process_response_opp_v4()
413 opp->perf, dom->info.name, ret); in process_response_opp_v4()
417 /* Note that PERF v4 reports always five 32-bit words */ in process_response_opp_v4()
418 opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); in process_response_opp_v4()
419 if (dom->level_indexing_mode) { in process_response_opp_v4()
420 opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index); in process_response_opp_v4()
422 ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp, in process_response_opp_v4()
426 "Failed to add opps_by_idx at %d for %s - ret:%d\n", in process_response_opp_v4()
427 opp->level_index, dom->info.name, ret); in process_response_opp_v4()
430 xa_erase(&dom->opps_by_lvl, opp->perf); in process_response_opp_v4()
435 hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); in process_response_opp_v4()
450 opp = &p->perf_dom->opp[p->perf_dom->opp_count]; in iter_perf_levels_process_response()
451 if (PROTOCOL_REV_MAJOR(p->version) <= 0x3) in iter_perf_levels_process_response()
452 ret = process_response_opp(ph->dev, p->perf_dom, opp, in iter_perf_levels_process_response()
453 st->loop_idx, response); in iter_perf_levels_process_response()
455 ret = process_response_opp_v4(ph->dev, p->perf_dom, opp, in iter_perf_levels_process_response()
456 st->loop_idx, response); in iter_perf_levels_process_response()
460 return ret == -EBUSY ? 0 : ret; in iter_perf_levels_process_response()
462 p->perf_dom->opp_count++; in iter_perf_levels_process_response()
464 dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n", in iter_perf_levels_process_response()
465 opp->perf, opp->power, opp->trans_latency_us, in iter_perf_levels_process_response()
466 opp->indicative_freq, opp->level_index); in iter_perf_levels_process_response()
487 iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, in scmi_perf_describe_levels_get()
494 ret = ph->hops->iter_response_run(iter); in scmi_perf_describe_levels_get()
498 if (perf_dom->opp_count) in scmi_perf_describe_levels_get()
499 sort(perf_dom->opp, perf_dom->opp_count, in scmi_perf_describe_levels_get()
507 struct scmi_perf_info *pi = ph->get_priv(ph); in scmi_perf_num_domains_get()
509 return pi->num_domains; in scmi_perf_num_domains_get()
515 struct scmi_perf_info *pi = ph->get_priv(ph); in scmi_perf_domain_lookup()
517 if (domain >= pi->num_domains) in scmi_perf_domain_lookup()
518 return ERR_PTR(-EINVAL); in scmi_perf_domain_lookup()
520 return pi->dom_info + domain; in scmi_perf_domain_lookup()
530 return ERR_PTR(-EINVAL); in scmi_perf_info_get()
532 return &dom->info; in scmi_perf_info_get()
542 ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET, in scmi_perf_msg_limits_set()
547 limits = t->tx.buf; in scmi_perf_msg_limits_set()
548 limits->domain = cpu_to_le32(domain); in scmi_perf_msg_limits_set()
549 limits->max_level = cpu_to_le32(max_perf); in scmi_perf_msg_limits_set()
550 limits->min_level = cpu_to_le32(min_perf); in scmi_perf_msg_limits_set()
552 ret = ph->xops->do_xfer(ph, t); in scmi_perf_msg_limits_set()
554 ph->xops->xfer_put(ph, t); in scmi_perf_msg_limits_set()
562 if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { in __scmi_perf_limits_set()
563 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; in __scmi_perf_limits_set()
566 dom->id, min_perf, max_perf); in __scmi_perf_limits_set()
567 iowrite32(max_perf, fci->set_addr); in __scmi_perf_limits_set()
568 iowrite32(min_perf, fci->set_addr + 4); in __scmi_perf_limits_set()
569 ph->hops->fastchannel_db_ring(fci->set_db); in __scmi_perf_limits_set()
573 return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf); in __scmi_perf_limits_set()
579 struct scmi_perf_info *pi = ph->get_priv(ph); in scmi_perf_limits_set()
586 if (!dom->set_limits) in scmi_perf_limits_set()
587 return -EOPNOTSUPP; in scmi_perf_limits_set()
589 if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) in scmi_perf_limits_set()
590 return -EINVAL; in scmi_perf_limits_set()
592 if (dom->level_indexing_mode) { in scmi_perf_limits_set()
596 opp = xa_load(&dom->opps_by_lvl, min_perf); in scmi_perf_limits_set()
598 return -EIO; in scmi_perf_limits_set()
600 min_perf = opp->level_index; in scmi_perf_limits_set()
604 opp = xa_load(&dom->opps_by_lvl, max_perf); in scmi_perf_limits_set()
606 return -EIO; in scmi_perf_limits_set()
608 max_perf = opp->level_index; in scmi_perf_limits_set()
622 ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET, in scmi_perf_msg_limits_get()
627 put_unaligned_le32(domain, t->tx.buf); in scmi_perf_msg_limits_get()
629 ret = ph->xops->do_xfer(ph, t); in scmi_perf_msg_limits_get()
631 limits = t->rx.buf; in scmi_perf_msg_limits_get()
633 *max_perf = le32_to_cpu(limits->max_level); in scmi_perf_msg_limits_get()
634 *min_perf = le32_to_cpu(limits->min_level); in scmi_perf_msg_limits_get()
637 ph->xops->xfer_put(ph, t); in scmi_perf_msg_limits_get()
645 if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { in __scmi_perf_limits_get()
646 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; in __scmi_perf_limits_get()
648 *max_perf = ioread32(fci->get_addr); in __scmi_perf_limits_get()
649 *min_perf = ioread32(fci->get_addr + 4); in __scmi_perf_limits_get()
651 dom->id, *min_perf, *max_perf); in __scmi_perf_limits_get()
655 return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf); in __scmi_perf_limits_get()
672 if (dom->level_indexing_mode) { in scmi_perf_limits_get()
675 opp = xa_load(&dom->opps_by_idx, *min_perf); in scmi_perf_limits_get()
677 return -EIO; in scmi_perf_limits_get()
679 *min_perf = opp->perf; in scmi_perf_limits_get()
681 opp = xa_load(&dom->opps_by_idx, *max_perf); in scmi_perf_limits_get()
683 return -EIO; in scmi_perf_limits_get()
685 *max_perf = opp->perf; in scmi_perf_limits_get()
698 ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t); in scmi_perf_msg_level_set()
702 t->hdr.poll_completion = poll; in scmi_perf_msg_level_set()
703 lvl = t->tx.buf; in scmi_perf_msg_level_set()
704 lvl->domain = cpu_to_le32(domain); in scmi_perf_msg_level_set()
705 lvl->level = cpu_to_le32(level); in scmi_perf_msg_level_set()
707 ret = ph->xops->do_xfer(ph, t); in scmi_perf_msg_level_set()
709 ph->xops->xfer_put(ph, t); in scmi_perf_msg_level_set()
717 if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { in __scmi_perf_level_set()
718 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; in __scmi_perf_level_set()
721 dom->id, level, 0); in __scmi_perf_level_set()
722 iowrite32(level, fci->set_addr); in __scmi_perf_level_set()
723 ph->hops->fastchannel_db_ring(fci->set_db); in __scmi_perf_level_set()
727 return scmi_perf_msg_level_set(ph, dom->id, level, poll); in __scmi_perf_level_set()
739 if (!dom->info.set_perf) in scmi_perf_level_set()
740 return -EOPNOTSUPP; in scmi_perf_level_set()
742 if (dom->level_indexing_mode) { in scmi_perf_level_set()
745 opp = xa_load(&dom->opps_by_lvl, level); in scmi_perf_level_set()
747 return -EIO; in scmi_perf_level_set()
749 level = opp->level_index; in scmi_perf_level_set()
761 ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET, in scmi_perf_msg_level_get()
766 t->hdr.poll_completion = poll; in scmi_perf_msg_level_get()
767 put_unaligned_le32(domain, t->tx.buf); in scmi_perf_msg_level_get()
769 ret = ph->xops->do_xfer(ph, t); in scmi_perf_msg_level_get()
771 *level = get_unaligned_le32(t->rx.buf); in scmi_perf_msg_level_get()
773 ph->xops->xfer_put(ph, t); in scmi_perf_msg_level_get()
781 if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { in __scmi_perf_level_get()
782 *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); in __scmi_perf_level_get()
784 dom->id, *level, 0); in __scmi_perf_level_get()
788 return scmi_perf_msg_level_get(ph, dom->id, level, poll); in __scmi_perf_level_get()
805 if (dom->level_indexing_mode) { in scmi_perf_level_get()
808 opp = xa_load(&dom->opps_by_idx, *level); in scmi_perf_level_get()
810 return -EIO; in scmi_perf_level_get()
812 *level = opp->perf; in scmi_perf_level_get()
826 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); in scmi_perf_level_limits_notify()
830 notify = t->tx.buf; in scmi_perf_level_limits_notify()
831 notify->domain = cpu_to_le32(domain); in scmi_perf_level_limits_notify()
832 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; in scmi_perf_level_limits_notify()
834 ret = ph->xops->do_xfer(ph, t); in scmi_perf_level_limits_notify()
836 ph->xops->xfer_put(ph, t); in scmi_perf_level_limits_notify()
845 fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); in scmi_perf_domain_init_fc()
849 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc()
850 PERF_LEVEL_GET, 4, dom->id, in scmi_perf_domain_init_fc()
854 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc()
855 PERF_LIMITS_GET, 8, dom->id, in scmi_perf_domain_init_fc()
859 if (dom->info.set_perf) in scmi_perf_domain_init_fc()
860 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc()
861 PERF_LEVEL_SET, 4, dom->id, in scmi_perf_domain_init_fc()
866 if (dom->set_limits) in scmi_perf_domain_init_fc()
867 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc()
868 PERF_LIMITS_SET, 8, dom->id, in scmi_perf_domain_init_fc()
873 dom->fc_info = fc; in scmi_perf_domain_init_fc()
880 unsigned long freq; in scmi_dvfs_device_opps_add() local
888 for (idx = 0; idx < dom->opp_count; idx++) { in scmi_dvfs_device_opps_add()
889 if (!dom->level_indexing_mode) in scmi_dvfs_device_opps_add()
890 freq = dom->opp[idx].perf * dom->mult_factor; in scmi_dvfs_device_opps_add()
892 freq = dom->opp[idx].indicative_freq * dom->mult_factor; in scmi_dvfs_device_opps_add()
895 data.turbo = freq > dom->sustained_freq_khz * 1000; in scmi_dvfs_device_opps_add()
897 data.level = dom->opp[idx].perf; in scmi_dvfs_device_opps_add()
898 data.freq = freq; in scmi_dvfs_device_opps_add()
903 domain, dom->info.name, idx, freq); in scmi_dvfs_device_opps_add()
909 domain, dom->info.name, idx, freq); in scmi_dvfs_device_opps_add()
925 return dom->opp[dom->opp_count - 1].trans_latency_us * 1000; in scmi_dvfs_transition_latency_get()
935 return -EINVAL; in scmi_dvfs_rate_limit_get()
941 *rate_limit = dom->rate_limit_us; in scmi_dvfs_rate_limit_get()
946 unsigned long freq, bool poll) in scmi_dvfs_freq_set() argument
955 if (!dom->level_indexing_mode) { in scmi_dvfs_freq_set()
956 level = freq / dom->mult_factor; in scmi_dvfs_freq_set()
960 opp = LOOKUP_BY_FREQ(dom->opps_by_freq, in scmi_dvfs_freq_set()
961 freq / dom->mult_factor); in scmi_dvfs_freq_set()
963 return -EIO; in scmi_dvfs_freq_set()
965 level = opp->level_index; in scmi_dvfs_freq_set()
972 unsigned long *freq, bool poll) in scmi_dvfs_freq_get() argument
986 if (!dom->level_indexing_mode) { in scmi_dvfs_freq_get()
987 *freq = level * dom->mult_factor; in scmi_dvfs_freq_get()
991 opp = xa_load(&dom->opps_by_idx, level); in scmi_dvfs_freq_get()
993 return -EIO; in scmi_dvfs_freq_get()
995 *freq = opp->indicative_freq * dom->mult_factor; in scmi_dvfs_freq_get()
1002 u32 domain, unsigned long *freq, in scmi_dvfs_est_power_get() argument
1007 int idx, ret = -EINVAL; in scmi_dvfs_est_power_get()
1014 for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { in scmi_dvfs_est_power_get()
1015 if (!dom->level_indexing_mode) in scmi_dvfs_est_power_get()
1016 opp_freq = opp->perf * dom->mult_factor; in scmi_dvfs_est_power_get()
1018 opp_freq = opp->indicative_freq * dom->mult_factor; in scmi_dvfs_est_power_get()
1020 if (opp_freq < *freq) in scmi_dvfs_est_power_get()
1023 *freq = opp_freq; in scmi_dvfs_est_power_get()
1024 *power = opp->power; in scmi_dvfs_est_power_get()
1041 return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; in scmi_fast_switch_possible()
1050 return -EINVAL; in scmi_fast_switch_rate_limit()
1056 if (!dom->fc_info) in scmi_fast_switch_rate_limit()
1057 return -EINVAL; in scmi_fast_switch_rate_limit()
1059 *rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit; in scmi_fast_switch_rate_limit()
1066 struct scmi_perf_info *pi = ph->get_priv(ph); in scmi_power_scale_get()
1068 return pi->power_scale; in scmi_power_scale_get()
1103 supported = dom->perf_limit_notify; in scmi_perf_notify_supported()
1105 supported = dom->perf_level_notify; in scmi_perf_notify_supported()
1116 return -EINVAL; in scmi_perf_set_notify_enabled()
1121 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", in scmi_perf_set_notify_enabled()
1129 unsigned int index, unsigned long *freq) in scmi_perf_xlate_opp_to_freq() argument
1133 if (!dom || !freq) in scmi_perf_xlate_opp_to_freq()
1134 return -EINVAL; in scmi_perf_xlate_opp_to_freq()
1136 if (!dom->level_indexing_mode) { in scmi_perf_xlate_opp_to_freq()
1137 opp = xa_load(&dom->opps_by_lvl, index); in scmi_perf_xlate_opp_to_freq()
1139 return -ENODEV; in scmi_perf_xlate_opp_to_freq()
1141 *freq = opp->perf * dom->mult_factor; in scmi_perf_xlate_opp_to_freq()
1143 opp = xa_load(&dom->opps_by_idx, index); in scmi_perf_xlate_opp_to_freq()
1145 return -ENODEV; in scmi_perf_xlate_opp_to_freq()
1147 *freq = opp->indicative_freq * dom->mult_factor; in scmi_perf_xlate_opp_to_freq()
1172 r->timestamp = timestamp; in scmi_perf_fill_custom_report()
1173 r->agent_id = le32_to_cpu(p->agent_id); in scmi_perf_fill_custom_report()
1174 r->domain_id = le32_to_cpu(p->domain_id); in scmi_perf_fill_custom_report()
1175 r->range_max = le32_to_cpu(p->range_max); in scmi_perf_fill_custom_report()
1176 r->range_min = le32_to_cpu(p->range_min); in scmi_perf_fill_custom_report()
1178 dom = scmi_perf_domain_lookup(ph, r->domain_id); in scmi_perf_fill_custom_report()
1185 *src_id = r->domain_id; in scmi_perf_fill_custom_report()
1188 ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max); in scmi_perf_fill_custom_report()
1192 ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min); in scmi_perf_fill_custom_report()
1197 r->range_max_freq = freq_max; in scmi_perf_fill_custom_report()
1198 r->range_min_freq = freq_min; in scmi_perf_fill_custom_report()
1206 unsigned long freq; in scmi_perf_fill_custom_report() local
1211 r->timestamp = timestamp; in scmi_perf_fill_custom_report()
1212 r->agent_id = le32_to_cpu(p->agent_id); in scmi_perf_fill_custom_report()
1213 r->domain_id = le32_to_cpu(p->domain_id); in scmi_perf_fill_custom_report()
1215 r->performance_level = le32_to_cpu(p->performance_level); in scmi_perf_fill_custom_report()
1217 dom = scmi_perf_domain_lookup(ph, r->domain_id); in scmi_perf_fill_custom_report()
1224 *src_id = r->domain_id; in scmi_perf_fill_custom_report()
1228 ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level, in scmi_perf_fill_custom_report()
1229 &freq); in scmi_perf_fill_custom_report()
1233 r->performance_level_freq = freq; in scmi_perf_fill_custom_report()
1246 struct scmi_perf_info *pi = ph->get_priv(ph); in scmi_perf_get_num_sources()
1249 return -EINVAL; in scmi_perf_get_num_sources()
1251 return pi->num_domains; in scmi_perf_get_num_sources()
1287 ret = ph->xops->version_get(ph, &version); in scmi_perf_protocol_init()
1291 dev_dbg(ph->dev, "Performance Version %d.%d\n", in scmi_perf_protocol_init()
1294 pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); in scmi_perf_protocol_init()
1296 return -ENOMEM; in scmi_perf_protocol_init()
1298 pinfo->version = version; in scmi_perf_protocol_init()
1304 pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, in scmi_perf_protocol_init()
1305 sizeof(*pinfo->dom_info), GFP_KERNEL); in scmi_perf_protocol_init()
1306 if (!pinfo->dom_info) in scmi_perf_protocol_init()
1307 return -ENOMEM; in scmi_perf_protocol_init()
1309 for (domain = 0; domain < pinfo->num_domains; domain++) { in scmi_perf_protocol_init()
1310 struct perf_dom_info *dom = pinfo->dom_info + domain; in scmi_perf_protocol_init()
1312 dom->id = domain; in scmi_perf_protocol_init()
1313 scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd, in scmi_perf_protocol_init()
1314 pinfo->notify_lvl_cmd, version); in scmi_perf_protocol_init()
1317 if (dom->perf_fastchannels) in scmi_perf_protocol_init()
1321 ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo); in scmi_perf_protocol_init()
1325 return ph->set_priv(ph, pinfo, version); in scmi_perf_protocol_init()