Lines Matching +full:rx +full:- +full:num +full:- +full:evt
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018-2022 ARM Ltd.
185 if (clk_id >= ci->num_clocks) in scmi_clock_domain_lookup()
186 return ERR_PTR(-EINVAL); in scmi_clock_domain_lookup()
188 return ci->clk + clk_id; in scmi_clock_domain_lookup()
199 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, in scmi_clock_protocol_attributes_get()
204 attr = t->rx.buf; in scmi_clock_protocol_attributes_get()
206 ret = ph->xops->do_xfer(ph, t); in scmi_clock_protocol_attributes_get()
208 ci->num_clocks = le16_to_cpu(attr->num_clocks); in scmi_clock_protocol_attributes_get()
209 ci->max_async_req = attr->max_async_req; in scmi_clock_protocol_attributes_get()
212 ph->xops->xfer_put(ph, t); in scmi_clock_protocol_attributes_get()
215 if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL)) in scmi_clock_protocol_attributes_get()
216 ci->notify_rate_changed_cmd = true; in scmi_clock_protocol_attributes_get()
218 if (!ph->hops->protocol_msg_check(ph, in scmi_clock_protocol_attributes_get()
221 ci->notify_rate_change_requested_cmd = true; in scmi_clock_protocol_attributes_get()
239 msg->id = cpu_to_le32(p->clk_id); in iter_clk_possible_parents_prepare_message()
241 msg->skip_parents = cpu_to_le32(desc_index); in iter_clk_possible_parents_prepare_message()
249 struct device *dev = ((struct scmi_clk_ipriv *)p)->dev; in iter_clk_possible_parents_update_state()
252 flags = le32_to_cpu(r->num_parent_flags); in iter_clk_possible_parents_update_state()
253 st->num_returned = NUM_PARENTS_RETURNED(flags); in iter_clk_possible_parents_update_state()
254 st->num_remaining = NUM_PARENTS_REMAINING(flags); in iter_clk_possible_parents_update_state()
257 * num parents is not declared previously anywhere so we in iter_clk_possible_parents_update_state()
260 if (!st->max_resources) { in iter_clk_possible_parents_update_state()
261 p->clk->num_parents = st->num_returned + st->num_remaining; in iter_clk_possible_parents_update_state()
262 p->clk->parents = devm_kcalloc(dev, p->clk->num_parents, in iter_clk_possible_parents_update_state()
263 sizeof(*p->clk->parents), in iter_clk_possible_parents_update_state()
265 if (!p->clk->parents) { in iter_clk_possible_parents_update_state()
266 p->clk->num_parents = 0; in iter_clk_possible_parents_update_state()
267 return -ENOMEM; in iter_clk_possible_parents_update_state()
269 st->max_resources = st->num_returned + st->num_remaining; in iter_clk_possible_parents_update_state()
283 u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx]; in iter_clk_possible_parents_process_response()
285 *parent = le32_to_cpu(r->possible_parents[st->loop_idx]); in iter_clk_possible_parents_process_response()
302 .dev = ph->dev, in scmi_clock_possible_parents()
307 iter = ph->hops->iter_response_init(ph, &ops, 0, in scmi_clock_possible_parents()
314 ret = ph->hops->iter_response_run(iter); in scmi_clock_possible_parents()
327 ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS, in scmi_clock_get_permissions()
332 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_get_permissions()
334 ret = ph->xops->do_xfer(ph, t); in scmi_clock_get_permissions()
336 perm = get_unaligned_le32(t->rx.buf); in scmi_clock_get_permissions()
338 clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED); in scmi_clock_get_permissions()
339 clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED); in scmi_clock_get_permissions()
340 clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED); in scmi_clock_get_permissions()
343 ph->xops->xfer_put(ph, t); in scmi_clock_get_permissions()
356 struct scmi_clock_info *clk = cinfo->clk + clk_id; in scmi_clock_attributes_get()
358 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, in scmi_clock_attributes_get()
363 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_attributes_get()
364 attr = t->rx.buf; in scmi_clock_attributes_get()
366 ret = ph->xops->do_xfer(ph, t); in scmi_clock_attributes_get()
370 attributes = le32_to_cpu(attr->attributes); in scmi_clock_attributes_get()
371 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); in scmi_clock_attributes_get()
374 latency = le32_to_cpu(attr->clock_enable_latency); in scmi_clock_attributes_get()
375 clk->enable_latency = latency ? : U32_MAX; in scmi_clock_attributes_get()
378 ph->xops->xfer_put(ph, t); in scmi_clock_attributes_get()
386 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, in scmi_clock_attributes_get()
387 NULL, clk->name, in scmi_clock_attributes_get()
390 if (cinfo->notify_rate_changed_cmd && in scmi_clock_attributes_get()
392 clk->rate_changed_notifications = true; in scmi_clock_attributes_get()
393 if (cinfo->notify_rate_change_requested_cmd && in scmi_clock_attributes_get()
395 clk->rate_change_requested_notifications = true; in scmi_clock_attributes_get()
402 clk->extended_config = true; in scmi_clock_attributes_get()
414 return -1; in rate_cmp_func()
428 msg->id = cpu_to_le32(p->clk_id); in iter_clk_describe_prepare_message()
430 msg->rate_index = cpu_to_le32(desc_index); in iter_clk_describe_prepare_message()
439 if (st->num_returned != 3 && st->num_remaining == 0 && \
440 st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
441 st->num_returned = 3; \
442 st->num_remaining = 0; \
444 dev_err(p->dev, \
445 "Cannot fix out-of-spec reply !\n"); \
446 return -EPROTO; \
458 flags = le32_to_cpu(r->num_rates_flags); in iter_clk_describe_update_state()
459 st->num_remaining = NUM_REMAINING(flags); in iter_clk_describe_update_state()
460 st->num_returned = NUM_RETURNED(flags); in iter_clk_describe_update_state()
461 p->clk->rate_discrete = RATE_DISCRETE(flags); in iter_clk_describe_update_state()
464 if (!p->clk->rate_discrete && in iter_clk_describe_update_state()
465 (st->num_returned != 3 || st->num_remaining != 0)) { in iter_clk_describe_update_state()
466 dev_warn(p->dev, in iter_clk_describe_update_state()
467 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", in iter_clk_describe_update_state()
468 p->clk->name, st->num_returned, st->num_remaining, in iter_clk_describe_update_state()
469 st->rx_len); in iter_clk_describe_update_state()
487 if (!p->clk->rate_discrete) { in iter_clk_describe_process_response()
488 switch (st->desc_index + st->loop_idx) { in iter_clk_describe_process_response()
490 p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); in iter_clk_describe_process_response()
493 p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); in iter_clk_describe_process_response()
496 p->clk->range.step_size = RATE_TO_U64(r->rate[2]); in iter_clk_describe_process_response()
499 ret = -EINVAL; in iter_clk_describe_process_response()
503 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; in iter_clk_describe_process_response()
505 *rate = RATE_TO_U64(r->rate[st->loop_idx]); in iter_clk_describe_process_response()
506 p->clk->list.num_rates++; in iter_clk_describe_process_response()
526 .dev = ph->dev, in scmi_clock_describe_rates_get()
529 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, in scmi_clock_describe_rates_get()
536 ret = ph->hops->iter_response_run(iter); in scmi_clock_describe_rates_get()
540 if (!clk->rate_discrete) { in scmi_clock_describe_rates_get()
541 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", in scmi_clock_describe_rates_get()
542 clk->range.min_rate, clk->range.max_rate, in scmi_clock_describe_rates_get()
543 clk->range.step_size); in scmi_clock_describe_rates_get()
544 } else if (clk->list.num_rates) { in scmi_clock_describe_rates_get()
545 sort(clk->list.rates, clk->list.num_rates, in scmi_clock_describe_rates_get()
546 sizeof(clk->list.rates[0]), rate_cmp_func, NULL); in scmi_clock_describe_rates_get()
559 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, in scmi_clock_rate_get()
564 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_rate_get()
566 ret = ph->xops->do_xfer(ph, t); in scmi_clock_rate_get()
568 *value = get_unaligned_le64(t->rx.buf); in scmi_clock_rate_get()
570 ph->xops->xfer_put(ph, t); in scmi_clock_rate_get()
581 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_rate_set()
588 if (clk->rate_ctrl_forbidden) in scmi_clock_rate_set()
589 return -EACCES; in scmi_clock_rate_set()
591 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); in scmi_clock_rate_set()
595 if (ci->max_async_req && in scmi_clock_rate_set()
596 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) in scmi_clock_rate_set()
599 cfg = t->tx.buf; in scmi_clock_rate_set()
600 cfg->flags = cpu_to_le32(flags); in scmi_clock_rate_set()
601 cfg->id = cpu_to_le32(clk_id); in scmi_clock_rate_set()
602 cfg->value_low = cpu_to_le32(rate & 0xffffffff); in scmi_clock_rate_set()
603 cfg->value_high = cpu_to_le32(rate >> 32); in scmi_clock_rate_set()
606 ret = ph->xops->do_xfer_with_response(ph, t); in scmi_clock_rate_set()
610 resp = t->rx.buf; in scmi_clock_rate_set()
611 if (le32_to_cpu(resp->id) == clk_id) in scmi_clock_rate_set()
612 dev_dbg(ph->dev, in scmi_clock_rate_set()
614 get_unaligned_le64(&resp->rate_low)); in scmi_clock_rate_set()
616 ret = -EPROTO; in scmi_clock_rate_set()
619 ret = ph->xops->do_xfer(ph, t); in scmi_clock_rate_set()
622 if (ci->max_async_req) in scmi_clock_rate_set()
623 atomic_dec(&ci->cur_async_req); in scmi_clock_rate_set()
625 ph->xops->xfer_put(ph, t); in scmi_clock_rate_set()
640 return -EINVAL; in scmi_clock_config_set()
642 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, in scmi_clock_config_set()
647 t->hdr.poll_completion = atomic; in scmi_clock_config_set()
649 cfg = t->tx.buf; in scmi_clock_config_set()
650 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_set()
651 cfg->attributes = cpu_to_le32(state); in scmi_clock_config_set()
653 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_set()
655 ph->xops->xfer_put(ph, t); in scmi_clock_config_set()
666 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_set_parent()
673 if (parent_id >= clk->num_parents) in scmi_clock_set_parent()
674 return -EINVAL; in scmi_clock_set_parent()
676 if (clk->parent_ctrl_forbidden) in scmi_clock_set_parent()
677 return -EACCES; in scmi_clock_set_parent()
679 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET, in scmi_clock_set_parent()
684 t->hdr.poll_completion = false; in scmi_clock_set_parent()
686 cfg = t->tx.buf; in scmi_clock_set_parent()
687 cfg->id = cpu_to_le32(clk_id); in scmi_clock_set_parent()
688 cfg->parent_id = cpu_to_le32(clk->parents[parent_id]); in scmi_clock_set_parent()
690 ret = ph->xops->do_xfer(ph, t); in scmi_clock_set_parent()
692 ph->xops->xfer_put(ph, t); in scmi_clock_set_parent()
704 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET, in scmi_clock_get_parent()
709 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_get_parent()
711 ret = ph->xops->do_xfer(ph, t); in scmi_clock_get_parent()
713 *parent_id = get_unaligned_le32(t->rx.buf); in scmi_clock_get_parent()
715 ph->xops->xfer_put(ph, t); in scmi_clock_get_parent()
733 return -EINVAL; in scmi_clock_config_set_v2()
735 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, in scmi_clock_config_set_v2()
740 t->hdr.poll_completion = atomic; in scmi_clock_config_set_v2()
745 cfg = t->tx.buf; in scmi_clock_config_set_v2()
746 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_set_v2()
747 cfg->attributes = cpu_to_le32(attrs); in scmi_clock_config_set_v2()
749 cfg->oem_config_val = cpu_to_le32(0); in scmi_clock_config_set_v2()
751 cfg->oem_config_val = cpu_to_le32(oem_val); in scmi_clock_config_set_v2()
753 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_set_v2()
755 ph->xops->xfer_put(ph, t); in scmi_clock_config_set_v2()
762 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_enable()
769 if (clk->state_ctrl_forbidden) in scmi_clock_enable()
770 return -EACCES; in scmi_clock_enable()
772 return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE, in scmi_clock_enable()
779 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_disable()
786 if (clk->state_ctrl_forbidden) in scmi_clock_disable()
787 return -EACCES; in scmi_clock_disable()
789 return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE, in scmi_clock_disable()
804 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET, in scmi_clock_config_get_v2()
809 t->hdr.poll_completion = atomic; in scmi_clock_config_get_v2()
813 cfg = t->tx.buf; in scmi_clock_config_get_v2()
814 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_get_v2()
815 cfg->flags = cpu_to_le32(flags); in scmi_clock_config_get_v2()
817 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_get_v2()
819 struct scmi_msg_resp_clock_config_get *resp = t->rx.buf; in scmi_clock_config_get_v2()
822 *attributes = le32_to_cpu(resp->attributes); in scmi_clock_config_get_v2()
825 *enabled = IS_CLK_ENABLED(resp->config); in scmi_clock_config_get_v2()
828 *oem_val = le32_to_cpu(resp->oem_config_val); in scmi_clock_config_get_v2()
831 ph->xops->xfer_put(ph, t); in scmi_clock_config_get_v2()
846 return -EINVAL; in scmi_clock_config_get()
848 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, in scmi_clock_config_get()
853 t->hdr.poll_completion = atomic; in scmi_clock_config_get()
854 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_config_get()
855 resp = t->rx.buf; in scmi_clock_config_get()
857 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_get()
859 *enabled = IS_CLK_ENABLED(resp->attributes); in scmi_clock_config_get()
861 ph->xops->xfer_put(ph, t); in scmi_clock_config_get()
869 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_state_get()
871 return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL, in scmi_clock_state_get()
880 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_config_oem_set()
887 if (!clk->extended_config) in scmi_clock_config_oem_set()
888 return -EOPNOTSUPP; in scmi_clock_config_oem_set()
890 return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED, in scmi_clock_config_oem_set()
899 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_config_oem_get()
906 if (!clk->extended_config) in scmi_clock_config_oem_get()
907 return -EOPNOTSUPP; in scmi_clock_config_oem_get()
909 return ci->clock_config_get(ph, clk_id, oem_type, attributes, in scmi_clock_config_oem_get()
915 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_count_get()
917 return ci->num_clocks; in scmi_clock_count_get()
924 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_info_get()
930 if (!clk->name[0]) in scmi_clock_info_get()
955 struct clock_info *ci = ph->get_priv(ph); in scmi_clk_notify_supported()
965 supported = clk->rate_changed_notifications; in scmi_clk_notify_supported()
967 supported = clk->rate_change_requested_notifications; in scmi_clk_notify_supported()
979 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); in scmi_clk_rate_notify()
983 notify = t->tx.buf; in scmi_clk_rate_notify()
984 notify->clk_id = cpu_to_le32(clk_id); in scmi_clk_rate_notify()
985 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; in scmi_clk_rate_notify()
987 ret = ph->xops->do_xfer(ph, t); in scmi_clk_rate_notify()
989 ph->xops->xfer_put(ph, t); in scmi_clk_rate_notify()
999 return -EINVAL; in scmi_clk_set_notify_enabled()
1004 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", in scmi_clk_set_notify_enabled()
1023 r->timestamp = timestamp; in scmi_clk_fill_custom_report()
1024 r->agent_id = le32_to_cpu(p->agent_id); in scmi_clk_fill_custom_report()
1025 r->clock_id = le32_to_cpu(p->clock_id); in scmi_clk_fill_custom_report()
1026 r->rate = get_unaligned_le64(&p->rate_low); in scmi_clk_fill_custom_report()
1027 *src_id = r->clock_id; in scmi_clk_fill_custom_report()
1034 struct clock_info *ci = ph->get_priv(ph); in scmi_clk_get_num_sources()
1037 return -EINVAL; in scmi_clk_get_num_sources()
1039 return ci->num_clocks; in scmi_clk_get_num_sources()
1075 ret = ph->xops->version_get(ph, &version); in scmi_clock_protocol_init()
1079 dev_dbg(ph->dev, "Clock Version %d.%d\n", in scmi_clock_protocol_init()
1082 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_clock_protocol_init()
1084 return -ENOMEM; in scmi_clock_protocol_init()
1090 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, in scmi_clock_protocol_init()
1091 sizeof(*cinfo->clk), GFP_KERNEL); in scmi_clock_protocol_init()
1092 if (!cinfo->clk) in scmi_clock_protocol_init()
1093 return -ENOMEM; in scmi_clock_protocol_init()
1095 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { in scmi_clock_protocol_init()
1096 struct scmi_clock_info *clk = cinfo->clk + clkid; in scmi_clock_protocol_init()
1104 cinfo->clock_config_set = scmi_clock_config_set_v2; in scmi_clock_protocol_init()
1105 cinfo->clock_config_get = scmi_clock_config_get_v2; in scmi_clock_protocol_init()
1107 cinfo->clock_config_set = scmi_clock_config_set; in scmi_clock_protocol_init()
1108 cinfo->clock_config_get = scmi_clock_config_get; in scmi_clock_protocol_init()
1111 cinfo->version = version; in scmi_clock_protocol_init()
1112 return ph->set_priv(ph, cinfo, version); in scmi_clock_protocol_init()