Lines Matching +full:mhu +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2024 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
60 * struct scmi_xfers_info - Structure to manage transfer information
68 * a number of xfers equal to the maximum allowed in-flight
71 * currently in-flight messages.
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
85 * @gid: A reference for per-protocol devres management.
93 * This field is NON-zero when a successful negotiation
115 * struct scmi_debug_info - Debug common info
131 * struct scmi_info - Structure representing a SCMI instance
137 * implementation version and (sub-)vendor identification.
141 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
287 version->vendor_id, in scmi_protocol_get()
288 version->sub_vendor_id, in scmi_protocol_get()
289 version->impl_ver); in scmi_protocol_get()
290 if (!proto || !try_module_get(proto->owner)) { in scmi_protocol_get()
298 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", in scmi_protocol_get()
299 protocol_id, proto->vendor_id ?: "", in scmi_protocol_get()
300 proto->sub_vendor_id ?: "", proto->impl_ver); in scmi_protocol_get()
308 module_put(proto->owner); in scmi_protocol_put()
313 if (!proto->vendor_id) { in scmi_vendor_protocol_check()
314 pr_err("missing vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
315 return -EINVAL; in scmi_vendor_protocol_check()
318 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
319 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
320 return -EINVAL; in scmi_vendor_protocol_check()
323 if (proto->sub_vendor_id && in scmi_vendor_protocol_check()
324 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
326 proto->id); in scmi_vendor_protocol_check()
327 return -EINVAL; in scmi_vendor_protocol_check()
340 return -EINVAL; in scmi_protocol_register()
343 if (!proto->instance_init) { in scmi_protocol_register()
344 pr_err("missing init for protocol 0x%x\n", proto->id); in scmi_protocol_register()
345 return -EINVAL; in scmi_protocol_register()
348 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE && in scmi_protocol_register()
350 return -EINVAL; in scmi_protocol_register()
356 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_register()
357 proto->sub_vendor_id, in scmi_protocol_register()
358 proto->impl_ver); in scmi_protocol_register()
360 return -EINVAL; in scmi_protocol_register()
364 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n", in scmi_protocol_register()
365 proto->id, ret); in scmi_protocol_register()
369 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_register()
379 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_unregister()
380 proto->sub_vendor_id, in scmi_protocol_unregister()
381 proto->impl_ver); in scmi_protocol_unregister()
387 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_unregister()
392 * scmi_create_protocol_devices - Create devices for all pending requests for
408 mutex_lock(&info->devreq_mtx); in scmi_create_protocol_devices()
409 sdev = scmi_device_create(np, info->dev, prot_id, name); in scmi_create_protocol_devices()
411 dev_err(info->dev, in scmi_create_protocol_devices()
414 mutex_unlock(&info->devreq_mtx); in scmi_create_protocol_devices()
420 mutex_lock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
421 scmi_device_destroy(info->dev, prot_id, name); in scmi_destroy_protocol_devices()
422 mutex_unlock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
430 info->notify_priv = priv; in scmi_notification_instance_data_set()
441 return info->notify_priv; in scmi_notification_instance_data_get()
445 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
447 * @minfo: Pointer to Tx/Rx Message management info based on channel type
451 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
452 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
453 * of incorrect association of a late and expired xfer with a live in-flight
454 * transaction, both happening to re-use the same token identifier.
456 * Since platform is NOT required to answer our request in-order we should
459 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
462 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
466 * X = used in-flight
469 * ------
471 * |- xfer_id picked
472 * -----------+----------------------------------------------------------
474 * ----------------------------------------------------------------------
476 * |- next_token
478 * Out-of-order pending at start
479 * -----------------------------
481 * |- xfer_id picked, last_token fixed
482 * -----+----------------------------------------------------------------
484 * ----------------------------------------------------------------------
486 * |- next_token
489 * Out-of-order pending at end
490 * ---------------------------
492 * |- xfer_id picked, last_token fixed
493 * -----+----------------------------------------------------------------
495 * ----------------------------------------------------------------------
497 * |- next_token
509 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
510 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
516 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
519 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
523 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
527 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
531 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
532 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
535 return -ENOMEM; in scmi_xfer_token_set()
538 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
540 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
542 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
548 * scmi_xfer_token_clear - Release the token
550 * @minfo: Pointer to Tx/Rx Message management info based on channel type
556 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
560 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
563 * @minfo: Pointer to Tx/Rx Message management info based on channel type
565 * Note that this helper assumes that the xfer to be registered as in-flight
575 /* Set in-flight */ in scmi_xfer_inflight_register_unlocked()
576 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_inflight_register_unlocked()
577 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); in scmi_xfer_inflight_register_unlocked()
578 xfer->pending = true; in scmi_xfer_inflight_register_unlocked()
582 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
585 * @minfo: Pointer to Tx/Rx Message management info based on channel type
590 * same sequence number is currently still registered as in-flight.
592 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
601 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
602 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) in scmi_xfer_inflight_register()
605 ret = -EBUSY; in scmi_xfer_inflight_register()
606 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
612 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
613 * flight on the TX channel, if possible.
625 return scmi_xfer_inflight_register(xfer, &info->tx_minfo); in scmi_xfer_raw_inflight_register()
629 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
630 * as pending in-flight
633 * @minfo: Pointer to Tx/Rx Message management info based on channel type
643 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
648 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
654 * scmi_xfer_get() - Allocate one message
657 * @minfo: Pointer to Tx/Rx Message management info based on channel type
680 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
681 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
682 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
683 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
687 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
688 hlist_del_init(&xfer->node); in scmi_xfer_get()
694 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
696 refcount_set(&xfer->users, 1); in scmi_xfer_get()
697 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
698 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
704 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
708 * Note that xfer is taken from the TX channel structures.
710 * Return: A valid xfer on Success, or an error-pointer otherwise
717 xfer = scmi_xfer_get(handle, &info->tx_minfo); in scmi_xfer_raw_get()
719 xfer->flags |= SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_get()
725 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
733 * protocol in range is allowed, re-using the Base channel, so as to enable
744 cinfo = idr_find(&info->tx_idr, protocol_id); in scmi_xfer_raw_channel_get()
747 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
749 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); in scmi_xfer_raw_channel_get()
751 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
752 dev_warn_once(handle->dev, in scmi_xfer_raw_channel_get()
761 * __scmi_xfer_put() - Release a message
763 * @minfo: Pointer to Tx/Rx Message management info based on channel type
776 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
777 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
778 if (xfer->pending) { in __scmi_xfer_put()
780 hash_del(&xfer->node); in __scmi_xfer_put()
781 xfer->pending = false; in __scmi_xfer_put()
783 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
785 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
789 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
801 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_put()
802 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; in scmi_xfer_raw_put()
803 return __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_raw_put()
807 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
809 * @minfo: Pointer to Tx/Rx Message management info based on channel type
823 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
824 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
826 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
830 * scmi_bad_message_trace - A helper to trace weird messages
838 * timed-out message that arrives and as such, can be traced only referring to
845 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_bad_message_trace()
862 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_bad_message_trace()
869 * scmi_msg_response_validate - Validate message type against state of related
878 * related synchronous response (Out-of-Order Delayed Response) the missing
881 * SCMI transport can deliver such out-of-order responses.
883 * Context: Assumes to be called with xfer->lock already acquired.
894 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
897 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
898 dev_err(cinfo->dev, in scmi_msg_response_validate()
900 xfer->hdr.seq); in scmi_msg_response_validate()
901 return -EINVAL; in scmi_msg_response_validate()
904 switch (xfer->state) { in scmi_msg_response_validate()
911 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
912 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
913 complete(&xfer->done); in scmi_msg_response_validate()
914 dev_warn(cinfo->dev, in scmi_msg_response_validate()
916 xfer->hdr.seq); in scmi_msg_response_validate()
921 return -EINVAL; in scmi_msg_response_validate()
925 return -EINVAL; in scmi_msg_response_validate()
932 * scmi_xfer_state_update - Update xfer state
945 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
948 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
949 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
951 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
958 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
964 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
981 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
982 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
987 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
990 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
993 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
996 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); in scmi_xfer_command_acquire()
1000 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
1001 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1003 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
1016 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
1019 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
1020 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
1021 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
1024 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); in scmi_xfer_command_acquire()
1028 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
1037 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
1038 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
1044 if (!cinfo->is_p2a) { in scmi_clear_channel()
1045 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); in scmi_clear_channel()
1049 if (info->desc->ops->clear_channel) in scmi_clear_channel()
1050 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
1057 struct device *dev = cinfo->dev; in scmi_handle_notification()
1058 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
1059 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
1063 xfer = scmi_xfer_get(cinfo->handle, minfo); in scmi_handle_notification()
1069 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); in scmi_handle_notification()
1075 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
1077 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
1078 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
1079 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
1082 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_notification()
1083 xfer->hdr.id, "NOTI", xfer->hdr.seq, in scmi_handle_notification()
1084 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
1085 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); in scmi_handle_notification()
1087 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
1088 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
1090 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
1091 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
1095 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_handle_notification()
1096 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, in scmi_handle_notification()
1097 cinfo->id); in scmi_handle_notification()
1109 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
1114 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); in scmi_handle_response()
1122 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
1123 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
1126 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
1127 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
1128 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
1130 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_response()
1131 xfer->hdr.id, in scmi_handle_response()
1132 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
1135 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
1136 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
1138 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
1139 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
1140 xfer->hdr.type); in scmi_handle_response()
1142 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
1144 complete(xfer->async_done); in scmi_handle_response()
1145 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); in scmi_handle_response()
1147 complete(&xfer->done); in scmi_handle_response()
1148 scmi_inc_count(info->dbg->counters, RESPONSE_OK); in scmi_handle_response()
1154 * RX path since it will be already queued at the end of the TX in scmi_handle_response()
1157 if (!xfer->hdr.poll_completion) in scmi_handle_response()
1158 scmi_raw_message_report(info->raw, xfer, in scmi_handle_response()
1160 cinfo->id); in scmi_handle_response()
1167 * scmi_rx_callback() - callback for receiving messages
1200 * xfer_put() - Release a transmit message
1209 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
1211 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
1217 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
1220 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
1221 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
1223 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
1224 try_wait_for_completion(&xfer->done) || in scmi_xfer_done_no_timeout()
1233 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1235 if (xfer->hdr.poll_completion) { in scmi_wait_for_reply()
1240 if (!desc->sync_cmds_completed_on_ret) { in scmi_wait_for_reply()
1251 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_reply()
1253 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1254 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); in scmi_wait_for_reply()
1262 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_reply()
1265 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_reply()
1266 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_reply()
1267 desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_reply()
1268 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_reply()
1270 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_reply()
1273 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_wait_for_reply()
1274 xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_reply()
1277 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_reply()
1278 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_reply()
1279 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); in scmi_wait_for_reply()
1282 scmi_raw_message_report(info->raw, xfer, in scmi_wait_for_reply()
1284 cinfo->id); in scmi_wait_for_reply()
1289 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_reply()
1293 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1294 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); in scmi_wait_for_reply()
1302 * scmi_wait_for_message_response - An helper to group all the possible ways of
1308 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1309 * configuration flags like xfer->hdr.poll_completion.
1316 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
1317 struct device *dev = info->dev; in scmi_wait_for_message_response()
1319 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
1320 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
1321 info->desc->max_rx_timeout_ms, in scmi_wait_for_message_response()
1322 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
1324 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, in scmi_wait_for_message_response()
1325 info->desc->max_rx_timeout_ms); in scmi_wait_for_message_response()
1329 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1343 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_raw_wait_for_message_response()
1344 struct device *dev = info->dev; in scmi_xfer_raw_wait_for_message_response()
1346 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); in scmi_xfer_raw_wait_for_message_response()
1348 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", in scmi_xfer_raw_wait_for_message_response()
1349 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_wait_for_message_response()
1355 * do_xfer() - Do one transfer
1360 * Return: -ETIMEDOUT in case of no response, if transmit error,
1369 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
1370 struct device *dev = info->dev; in do_xfer()
1374 if (xfer->hdr.poll_completion && in do_xfer()
1375 !is_transport_polling_capable(info->desc)) { in do_xfer()
1378 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); in do_xfer()
1379 return -EINVAL; in do_xfer()
1382 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
1384 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); in do_xfer()
1385 return -EINVAL; in do_xfer()
1388 if (is_polling_enabled(cinfo, info->desc)) in do_xfer()
1389 xfer->hdr.poll_completion = true; in do_xfer()
1396 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
1397 reinit_completion(&xfer->done); in do_xfer()
1399 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1400 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
1401 xfer->hdr.poll_completion); in do_xfer()
1404 xfer->hdr.status = SCMI_SUCCESS; in do_xfer()
1405 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
1408 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
1409 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
1411 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
1415 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
1418 scmi_inc_count(info->dbg->counters, SENT_FAIL); in do_xfer()
1422 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in do_xfer()
1423 xfer->hdr.id, "CMND", xfer->hdr.seq, in do_xfer()
1424 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); in do_xfer()
1425 scmi_inc_count(info->dbg->counters, SENT_OK); in do_xfer()
1428 if (!ret && xfer->hdr.status) { in do_xfer()
1429 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
1430 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); in do_xfer()
1433 if (info->desc->ops->mark_txdone) in do_xfer()
1434 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
1436 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1437 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
1446 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
1448 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
1452 * do_xfer_with_response() - Do one transfer and wait until the delayed
1459 * it could cause long busy-waiting here, so ignore polling for the delayed
1472 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1481 xfer->async_done = &async_response; in do_xfer_with_response()
1489 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
1493 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1494 dev_err(ph->dev, in do_xfer_with_response()
1497 ret = -ETIMEDOUT; in do_xfer_with_response()
1498 } else if (xfer->hdr.status) { in do_xfer_with_response()
1499 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1503 xfer->async_done = NULL; in do_xfer_with_response()
1508 * xfer_get_init() - Allocate and initialise one message for transmit
1529 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1530 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1531 struct device *dev = info->dev; in xfer_get_init()
1534 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1535 tx_size > info->desc->max_msg_size) in xfer_get_init()
1536 return -ERANGE; in xfer_get_init()
1538 xfer = scmi_xfer_get(pi->handle, minfo); in xfer_get_init()
1545 /* Pick a sequence number and register this xfer as in-flight */ in xfer_get_init()
1548 dev_err(pi->handle->dev, in xfer_get_init()
1554 xfer->tx.len = tx_size; in xfer_get_init()
1555 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1556 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1557 xfer->hdr.id = msg_id; in xfer_get_init()
1558 xfer->hdr.poll_completion = false; in xfer_get_init()
1566 * version_get() - command to get the revision of the SCMI entity
1587 rev_info = t->rx.buf; in version_get()
1596 * scmi_set_protocol_priv - Set protocol specific data at init time
1609 pi->priv = priv; in scmi_set_protocol_priv()
1610 pi->version = version; in scmi_set_protocol_priv()
1616 * scmi_get_protocol_priv - Set protocol specific data at init time
1626 return pi->priv; in scmi_get_protocol_priv()
1644 * scmi_common_extended_name_get - Common helper to get extended resources name
1665 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t); in scmi_common_extended_name_get()
1669 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1671 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id)); in scmi_common_extended_name_get()
1672 resp = t->rx.buf; in scmi_common_extended_name_get()
1674 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1676 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1678 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1681 dev_warn(ph->dev, in scmi_common_extended_name_get()
1682 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1688 * scmi_common_get_max_msg_size - Get maximum message size
1696 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_common_get_max_msg_size()
1698 return info->desc->max_msg_size; in scmi_common_get_max_msg_size()
1702 * struct scmi_iterator - Iterator descriptor
1703 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1704 * a proper custom command payload for each multi-part command request.
1706 * @process_response to parse the multi-part replies.
1712 * internal routines and by the caller-provided @scmi_iterator_ops.
1734 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1736 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1738 i->ph = ph; in scmi_iterator_init()
1739 i->ops = ops; in scmi_iterator_init()
1740 i->priv = priv; in scmi_iterator_init()
1742 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1744 devm_kfree(ph->dev, i); in scmi_iterator_init()
1748 i->state.max_resources = max_resources; in scmi_iterator_init()
1749 i->msg = i->t->tx.buf; in scmi_iterator_init()
1750 i->resp = i->t->rx.buf; in scmi_iterator_init()
1757 int ret = -EINVAL; in scmi_iterator_run()
1763 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1766 iops = i->ops; in scmi_iterator_run()
1767 ph = i->ph; in scmi_iterator_run()
1768 st = &i->state; in scmi_iterator_run()
1771 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1772 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1776 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1777 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1781 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1782 dev_err(ph->dev, in scmi_iterator_run()
1784 st->max_resources); in scmi_iterator_run()
1785 ret = -EINVAL; in scmi_iterator_run()
1789 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1790 st->loop_idx++) { in scmi_iterator_run()
1791 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1796 st->desc_index += st->num_returned; in scmi_iterator_run()
1797 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1802 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1806 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1807 devm_kfree(ph->dev, i); in scmi_iterator_run()
1851 ret = -EINVAL; in scmi_common_fastchannel_init()
1855 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1860 info = t->tx.buf; in scmi_common_fastchannel_init()
1861 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1862 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1869 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1873 resp = t->rx.buf; in scmi_common_fastchannel_init()
1874 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1875 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1877 ret = -EINVAL; in scmi_common_fastchannel_init()
1882 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0); in scmi_common_fastchannel_init()
1884 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1885 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1886 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1888 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1895 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1897 ret = -ENOMEM; in scmi_common_fastchannel_init()
1902 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1903 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1904 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1906 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1910 db->addr = addr; in scmi_common_fastchannel_init()
1911 db->width = size; in scmi_common_fastchannel_init()
1912 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1913 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1914 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1915 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
1920 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1922 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1924 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
1929 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
1935 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1938 dev_warn(ph->dev, in scmi_common_fastchannel_init()
1939 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
1940 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1947 if (db->mask) \
1948 val = ioread##w(db->addr) & db->mask; \
1949 iowrite##w((u##w)db->set | val, db->addr); \
1954 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
1957 if (db->width == 1) in scmi_common_fastchannel_db_ring()
1959 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
1961 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
1963 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
1970 if (db->mask) in scmi_common_fastchannel_db_ring()
1971 val = ioread64_hi_lo(db->addr) & db->mask; in scmi_common_fastchannel_db_ring()
1972 iowrite64_hi_lo(db->set | val, db->addr); in scmi_common_fastchannel_db_ring()
1978 * scmi_protocol_msg_check - Check protocol message attributes
2001 put_unaligned_le32(message_id, t->tx.buf); in scmi_protocol_msg_check()
2004 *attributes = get_unaligned_le32(t->rx.buf); in scmi_protocol_msg_check()
2021 * scmi_revision_area_get - Retrieve version memory area.
2036 return pi->handle->version; in scmi_revision_area_get()
2040 * scmi_protocol_version_negotiate - Negotiate protocol version
2067 put_unaligned_le32(pi->proto->supported_version, t->tx.buf); in scmi_protocol_version_negotiate()
2070 pi->negotiated_version = pi->proto->supported_version; in scmi_protocol_version_negotiate()
2078 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2085 * all resources management is handled via a dedicated per-protocol devres
2097 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
2100 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
2103 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2109 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2113 pi->gid = gid; in scmi_alloc_init_protocol_instance()
2114 pi->proto = proto; in scmi_alloc_init_protocol_instance()
2115 pi->handle = handle; in scmi_alloc_init_protocol_instance()
2116 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
2117 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
2118 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
2119 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
2120 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
2121 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
2122 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
2123 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
2127 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
2129 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
2136 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
2137 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
2138 &pi->ph, in scmi_alloc_init_protocol_instance()
2139 pi->proto->events); in scmi_alloc_init_protocol_instance()
2141 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2142 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
2143 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
2146 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
2147 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
2149 if (pi->version > proto->supported_version) { in scmi_alloc_init_protocol_instance()
2150 ret = scmi_protocol_version_negotiate(&pi->ph); in scmi_alloc_init_protocol_instance()
2152 dev_info(handle->dev, in scmi_alloc_init_protocol_instance()
2154 proto->id, pi->negotiated_version); in scmi_alloc_init_protocol_instance()
2156 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2158 pi->version, pi->proto->id); in scmi_alloc_init_protocol_instance()
2159 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2161 pi->proto->supported_version); in scmi_alloc_init_protocol_instance()
2170 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
2176 * scmi_get_protocol_instance - Protocol initialization helper.
2182 * resource allocation with a dedicated per-protocol devres subgroup.
2185 * in particular returns -EPROBE_DEFER when the desired protocol could
2194 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
2195 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
2198 refcount_inc(&pi->users); in scmi_get_protocol_instance()
2203 proto = scmi_protocol_get(protocol_id, &info->version); in scmi_get_protocol_instance()
2207 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
2209 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
2215 * scmi_protocol_acquire - Protocol acquire
2230 * scmi_protocol_release - Protocol de-initialization helper.
2234 * Remove one user for the specified protocol and triggers de-initialization
2235 * and resources de-allocation once the last user has gone.
2242 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
2243 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
2247 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
2248 void *gid = pi->gid; in scmi_protocol_release()
2250 if (pi->proto->events) in scmi_protocol_release()
2253 if (pi->proto->instance_deinit) in scmi_protocol_release()
2254 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
2256 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
2258 scmi_protocol_put(pi->proto); in scmi_protocol_release()
2260 devres_release_group(handle->dev, gid); in scmi_protocol_release()
2261 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
2266 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
2273 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
2275 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
2283 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
2285 if (!info->protocols_imp) in scmi_is_protocol_implemented()
2288 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
2289 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
2303 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
2315 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
2317 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
2323 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
2324 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
2325 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
2331 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2342 * released, and possibly de-initialized on last user, once the SCMI driver
2355 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
2361 *ph = &pi->ph; in scmi_devm_protocol_get()
2363 return pi->proto->ops; in scmi_devm_protocol_get()
2367 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2377 * released, and possibly de-initialized on last user, once the SCMI driver
2401 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
2405 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2417 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
2423 * scmi_is_transport_atomic - Method to check if underlying transport for an
2438 ret = info->desc->atomic_enabled && in scmi_is_transport_atomic()
2439 is_transport_polling_capable(info->desc); in scmi_is_transport_atomic()
2441 *atomic_threshold = info->desc->atomic_threshold; in scmi_is_transport_atomic()
2447 * scmi_handle_get() - Get the SCMI handle for a device
2466 if (dev->parent == info->dev) { in scmi_handle_get()
2467 info->users++; in scmi_handle_get()
2468 handle = &info->handle; in scmi_handle_get()
2478 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2487 * if null was passed, it returns -EINVAL;
2494 return -EINVAL; in scmi_handle_put()
2498 if (!WARN_ON(!info->users)) in scmi_handle_put()
2499 info->users--; in scmi_handle_put()
2517 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); in scmi_set_handle()
2518 if (scmi_dev->handle) in scmi_set_handle()
2519 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); in scmi_set_handle()
2527 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
2528 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
2530 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
2531 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
2533 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
2534 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
2535 return -EINVAL; in __scmi_xfer_info_init()
2538 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
2541 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, in __scmi_xfer_info_init()
2543 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
2544 return -ENOMEM; in __scmi_xfer_info_init()
2548 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
2551 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
2552 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
2555 return -ENOMEM; in __scmi_xfer_info_init()
2557 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
2559 if (!xfer->rx.buf) in __scmi_xfer_info_init()
2560 return -ENOMEM; in __scmi_xfer_info_init()
2562 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
2563 init_completion(&xfer->done); in __scmi_xfer_info_init()
2564 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
2567 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
2570 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
2577 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
2579 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
2580 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2581 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2585 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2587 return -EINVAL; in scmi_channels_max_msg_configure()
2588 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2591 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2593 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
2594 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2608 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
2609 if (!ret && !idr_is_empty(&sinfo->rx_idr)) in scmi_xfer_info_init()
2610 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
2616 int prot_id, bool tx) in scmi_chan_setup() argument
2625 idx = tx ? 0 : 1; in scmi_chan_setup()
2626 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2628 if (!info->desc->ops->chan_available(of_node, idx)) { in scmi_chan_setup()
2631 return -EINVAL; in scmi_chan_setup()
2635 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2637 return -ENOMEM; in scmi_chan_setup()
2639 cinfo->is_p2a = !tx; in scmi_chan_setup()
2640 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2641 cinfo->max_msg_size = info->desc->max_msg_size; in scmi_chan_setup()
2645 idx ? "rx" : "tx", prot_id); in scmi_chan_setup()
2647 tdev = scmi_device_create(of_node, info->dev, prot_id, name); in scmi_chan_setup()
2649 dev_err(info->dev, in scmi_chan_setup()
2651 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2652 return -EINVAL; in scmi_chan_setup()
2656 cinfo->id = prot_id; in scmi_chan_setup()
2657 cinfo->dev = &tdev->dev; in scmi_chan_setup()
2658 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2661 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2662 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2666 if (tx && is_polling_required(cinfo, info->desc)) { in scmi_chan_setup()
2667 if (is_transport_polling_capable(info->desc)) in scmi_chan_setup()
2668 dev_info(&tdev->dev, in scmi_chan_setup()
2669 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2672 dev_warn(&tdev->dev, in scmi_chan_setup()
2679 dev_err(info->dev, in scmi_chan_setup()
2684 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2685 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2690 cinfo->handle = &info->handle; in scmi_chan_setup()
2703 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2708 dev_err(info->dev, in scmi_txrx_setup()
2715 * scmi_channels_setup - Helper to initialize all required channels
2725 * Note that, even though a pair of TX/RX channels is associated to each
2735 struct device_node *top_np = info->dev->of_node; in scmi_channels_setup()
2749 dev_err(info->dev, in scmi_channels_setup()
2764 if (cinfo->dev) { in scmi_chan_destroy()
2765 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_chan_destroy()
2766 struct scmi_device *sdev = to_scmi_dev(cinfo->dev); in scmi_chan_destroy()
2768 of_node_put(cinfo->dev->of_node); in scmi_chan_destroy()
2769 scmi_device_destroy(info->dev, id, sdev->name); in scmi_chan_destroy()
2770 cinfo->dev = NULL; in scmi_chan_destroy()
2781 idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_channels()
2791 scmi_cleanup_channels(info, &info->tx_idr); in scmi_cleanup_txrx_channels()
2793 scmi_cleanup_channels(info, &info->rx_idr); in scmi_cleanup_txrx_channels()
2803 if (!strncmp(sdev->name, "__scmi_transport_device", 23) || in scmi_bus_notifier()
2804 sdev->dev.parent != info->dev) in scmi_bus_notifier()
2813 scmi_handle_put(sdev->handle); in scmi_bus_notifier()
2814 sdev->handle = NULL; in scmi_bus_notifier()
2820 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), in scmi_bus_notifier()
2821 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? in scmi_bus_notifier()
2834 np = idr_find(&info->active_protocols, id_table->protocol_id); in scmi_device_request_notifier()
2838 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", in scmi_device_request_notifier()
2839 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", in scmi_device_request_notifier()
2840 id_table->name, id_table->protocol_id); in scmi_device_request_notifier()
2844 scmi_create_protocol_devices(np, info, id_table->protocol_id, in scmi_device_request_notifier()
2845 id_table->name); in scmi_device_request_notifier()
2848 scmi_destroy_protocol_devices(info, id_table->protocol_id, in scmi_device_request_notifier()
2849 id_table->name); in scmi_device_request_notifier()
2878 struct scmi_debug_info *dbg = filp->private_data; in reset_all_on_write()
2881 atomic_set(&dbg->counters[i], 0); in reset_all_on_write()
2902 &dbg->counters[idx]); in scmi_debugfs_counters_setup()
2914 debugfs_remove_recursive(dbg->top_dentry); in scmi_debugfs_common_cleanup()
2915 kfree(dbg->name); in scmi_debugfs_common_cleanup()
2916 kfree(dbg->type); in scmi_debugfs_common_cleanup()
2926 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); in scmi_debugfs_common_setup()
2930 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); in scmi_debugfs_common_setup()
2931 if (!dbg->name) { in scmi_debugfs_common_setup()
2932 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2936 of_property_read_string(info->dev->of_node, "compatible", &c_ptr); in scmi_debugfs_common_setup()
2937 dbg->type = kstrdup(c_ptr, GFP_KERNEL); in scmi_debugfs_common_setup()
2938 if (!dbg->type) { in scmi_debugfs_common_setup()
2939 kfree(dbg->name); in scmi_debugfs_common_setup()
2940 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2944 snprintf(top_dir, 16, "%d", info->id); in scmi_debugfs_common_setup()
2948 dbg->is_atomic = info->desc->atomic_enabled && in scmi_debugfs_common_setup()
2949 is_transport_polling_capable(info->desc); in scmi_debugfs_common_setup()
2952 (char **)&dbg->name); in scmi_debugfs_common_setup()
2955 (u32 *)&info->desc->atomic_threshold); in scmi_debugfs_common_setup()
2957 debugfs_create_str("type", 0400, trans, (char **)&dbg->type); in scmi_debugfs_common_setup()
2959 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); in scmi_debugfs_common_setup()
2962 (u32 *)&info->desc->max_rx_timeout_ms); in scmi_debugfs_common_setup()
2965 (u32 *)&info->desc->max_msg_size); in scmi_debugfs_common_setup()
2968 (u32 *)&info->tx_minfo.max_msg); in scmi_debugfs_common_setup()
2971 (u32 *)&info->rx_minfo.max_msg); in scmi_debugfs_common_setup()
2976 dbg->top_dentry = top_dentry; in scmi_debugfs_common_setup()
2978 if (devm_add_action_or_reset(info->dev, in scmi_debugfs_common_setup()
2992 if (!info->dbg) in scmi_debugfs_raw_mode_setup()
2993 return -EINVAL; in scmi_debugfs_raw_mode_setup()
2996 idr_for_each_entry(&info->tx_idr, cinfo, id) { in scmi_debugfs_raw_mode_setup()
3002 dev_warn(info->dev, in scmi_debugfs_raw_mode_setup()
3003 "SCMI RAW - Error enumerating channels\n"); in scmi_debugfs_raw_mode_setup()
3007 if (!test_bit(cinfo->id, protos)) { in scmi_debugfs_raw_mode_setup()
3008 channels[num_chans++] = cinfo->id; in scmi_debugfs_raw_mode_setup()
3009 set_bit(cinfo->id, protos); in scmi_debugfs_raw_mode_setup()
3013 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, in scmi_debugfs_raw_mode_setup()
3014 info->id, channels, num_chans, in scmi_debugfs_raw_mode_setup()
3015 info->desc, info->tx_minfo.max_msg); in scmi_debugfs_raw_mode_setup()
3016 if (IS_ERR(info->raw)) { in scmi_debugfs_raw_mode_setup()
3017 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); in scmi_debugfs_raw_mode_setup()
3018 ret = PTR_ERR(info->raw); in scmi_debugfs_raw_mode_setup()
3019 info->raw = NULL; in scmi_debugfs_raw_mode_setup()
3031 if (!trans || !trans->desc || !trans->supplier || !trans->core_ops) in scmi_transport_setup()
3034 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { in scmi_transport_setup()
3041 *trans->core_ops = &scmi_trans_core_ops; in scmi_transport_setup()
3043 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); in scmi_transport_setup()
3045 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", in scmi_transport_setup()
3046 &trans->desc->max_rx_timeout_ms); in scmi_transport_setup()
3047 if (ret && ret != -EINVAL) in scmi_transport_setup()
3048 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); in scmi_transport_setup()
3050 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size", in scmi_transport_setup()
3051 &trans->desc->max_msg_size); in scmi_transport_setup()
3052 if (ret && ret != -EINVAL) in scmi_transport_setup()
3053 dev_err(dev, "Malformed arm,max-msg-size DT property.\n"); in scmi_transport_setup()
3055 ret = of_property_read_u32(dev->of_node, "arm,max-msg", in scmi_transport_setup()
3056 &trans->desc->max_msg); in scmi_transport_setup()
3057 if (ret && ret != -EINVAL) in scmi_transport_setup()
3058 dev_err(dev, "Malformed arm,max-msg DT property.\n"); in scmi_transport_setup()
3061 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n", in scmi_transport_setup()
3062 trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size, in scmi_transport_setup()
3063 trans->desc->max_msg); in scmi_transport_setup()
3066 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us", in scmi_transport_setup()
3067 &trans->desc->atomic_threshold)) in scmi_transport_setup()
3070 trans->desc->atomic_threshold); in scmi_transport_setup()
3072 return trans->desc; in scmi_transport_setup()
3083 struct device *dev = &pdev->dev; in scmi_probe()
3084 struct device_node *child, *np = dev->of_node; in scmi_probe()
3089 ret = -EINVAL; in scmi_probe()
3095 return -ENOMEM; in scmi_probe()
3097 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); in scmi_probe()
3098 if (info->id < 0) in scmi_probe()
3099 return info->id; in scmi_probe()
3101 info->dev = dev; in scmi_probe()
3102 info->desc = desc; in scmi_probe()
3103 info->bus_nb.notifier_call = scmi_bus_notifier; in scmi_probe()
3104 info->dev_req_nb.notifier_call = scmi_device_request_notifier; in scmi_probe()
3105 INIT_LIST_HEAD(&info->node); in scmi_probe()
3106 idr_init(&info->protocols); in scmi_probe()
3107 mutex_init(&info->protocols_mtx); in scmi_probe()
3108 idr_init(&info->active_protocols); in scmi_probe()
3109 mutex_init(&info->devreq_mtx); in scmi_probe()
3112 idr_init(&info->tx_idr); in scmi_probe()
3113 idr_init(&info->rx_idr); in scmi_probe()
3115 handle = &info->handle; in scmi_probe()
3116 handle->dev = info->dev; in scmi_probe()
3117 handle->version = &info->version; in scmi_probe()
3118 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
3119 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
3120 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
3121 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
3130 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3137 &info->dev_req_nb); in scmi_probe()
3150 info->dbg = scmi_debugfs_common_setup(info); in scmi_probe()
3151 if (!info->dbg) in scmi_probe()
3172 if (info->desc->atomic_enabled && in scmi_probe()
3173 !is_transport_polling_capable(info->desc)) in scmi_probe()
3193 list_add_tail(&info->node, &scmi_list); in scmi_probe()
3215 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
3231 scmi_raw_mode_cleanup(info->raw); in scmi_probe()
3232 scmi_notification_exit(&info->handle); in scmi_probe()
3235 &info->dev_req_nb); in scmi_probe()
3237 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3241 ida_free(&scmi_id, info->id); in scmi_probe()
3254 scmi_raw_mode_cleanup(info->raw); in scmi_remove()
3257 if (info->users) in scmi_remove()
3258 dev_warn(&pdev->dev, in scmi_remove()
3260 list_del(&info->node); in scmi_remove()
3263 scmi_notification_exit(&info->handle); in scmi_remove()
3265 mutex_lock(&info->protocols_mtx); in scmi_remove()
3266 idr_destroy(&info->protocols); in scmi_remove()
3267 mutex_unlock(&info->protocols_mtx); in scmi_remove()
3269 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
3271 idr_destroy(&info->active_protocols); in scmi_remove()
3274 &info->dev_req_nb); in scmi_remove()
3275 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_remove()
3280 ida_free(&scmi_id, info->id); in scmi_remove()
3288 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
3289 info->version.minor_ver); in protocol_version_show()
3298 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
3307 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
3316 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
3331 .name = "arm-scmi",
3356 return -EINVAL; in scmi_driver_init()
3403 MODULE_ALIAS("platform:arm-scmi");