Lines Matching +full:mhu +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2024 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
47 #define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
63 * struct scmi_xfers_info - Structure to manage transfer information
71 * a number of xfers equal to the maximum allowed in-flight
74 * currently in-flight messages.
85 * struct scmi_protocol_instance - Describe an initialized protocol instance.
88 * @gid: A reference for per-protocol devres management.
96 * This field is NON-zero when a successful negotiation
118 * struct scmi_debug_info - Debug common info
134 * struct scmi_info - Structure representing a SCMI instance
140 * implementation version and (sub-)vendor identification.
145 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
286 proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
287 version->sub_vendor_id,
288 version->impl_ver);
293 protocol_id, version->vendor_id);
297 protocol_id, version->vendor_id);
306 version->vendor_id,
307 version->sub_vendor_id,
308 version->impl_ver);
312 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
313 protocol_id, proto->vendor_id ?: "",
314 proto->sub_vendor_id ?: "", proto->impl_ver);
329 if (!proto || !try_module_get(proto->owner)) {
342 module_put(proto->owner);
347 if (!proto->vendor_id) {
348 pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
349 return -EINVAL;
352 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
353 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
354 return -EINVAL;
357 if (proto->sub_vendor_id &&
358 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
360 proto->id);
361 return -EINVAL;
374 return -EINVAL;
377 if (!proto->instance_init) {
378 pr_err("missing init for protocol 0x%x\n", proto->id);
379 return -EINVAL;
382 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
384 return -EINVAL;
390 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
391 proto->sub_vendor_id,
392 proto->impl_ver);
394 return -EINVAL;
398 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
399 proto->id, ret);
403 pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
404 proto->id, proto->vendor_id, proto->sub_vendor_id,
405 proto->impl_ver);
415 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
416 proto->sub_vendor_id,
417 proto->impl_ver);
423 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
428 * scmi_create_protocol_devices - Create devices for all pending requests for
444 mutex_lock(&info->devreq_mtx);
445 sdev = scmi_device_create(np, info->dev, prot_id, name);
447 dev_err(info->dev,
450 mutex_unlock(&info->devreq_mtx);
456 mutex_lock(&info->devreq_mtx);
457 scmi_device_destroy(info->dev, prot_id, name);
458 mutex_unlock(&info->devreq_mtx);
466 info->notify_priv = priv;
477 return info->notify_priv;
481 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
483 * @minfo: Pointer to Tx/Rx Message management info based on channel type
487 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
488 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
489 * of incorrect association of a late and expired xfer with a live in-flight
490 * transaction, both happening to re-use the same token identifier.
492 * Since platform is NOT required to answer our request in-order we should
495 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
498 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
502 * X = used in-flight
505 * ------
507 * |- xfer_id picked
508 * -----------+----------------------------------------------------------
510 * ----------------------------------------------------------------------
512 * |- next_token
514 * Out-of-order pending at start
515 * -----------------------------
517 * |- xfer_id picked, last_token fixed
518 * -----+----------------------------------------------------------------
520 * ----------------------------------------------------------------------
522 * |- next_token
525 * Out-of-order pending at end
526 * ---------------------------
528 * |- xfer_id picked, last_token fixed
529 * -----+----------------------------------------------------------------
531 * ----------------------------------------------------------------------
533 * |- next_token
545 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
546 * using the pre-allocated transfer_id as a base.
552 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
555 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
559 * After heavily out-of-order responses, there are no free
563 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
567 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
568 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
571 return -ENOMEM;
574 /* Update +/- last_token accordingly if we skipped some hole */
576 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
578 xfer->hdr.seq = (u16)xfer_id;
584 * scmi_xfer_token_clear - Release the token
586 * @minfo: Pointer to Tx/Rx Message management info based on channel type
592 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
596 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
599 * @minfo: Pointer to Tx/Rx Message management info based on channel type
601 * Note that this helper assumes that the xfer to be registered as in-flight
611 /* Set in-flight */
612 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
613 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
614 xfer->pending = true;
618 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
621 * @minfo: Pointer to Tx/Rx Message management info based on channel type
626 * same sequence number is currently still registered as in-flight.
628 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
637 spin_lock_irqsave(&minfo->xfer_lock, flags);
638 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
641 ret = -EBUSY;
642 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
648 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
661 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
665 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
666 * as pending in-flight
669 * @minfo: Pointer to Tx/Rx Message management info based on channel type
679 spin_lock_irqsave(&minfo->xfer_lock, flags);
684 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
690 * scmi_xfer_get() - Allocate one message
693 * @minfo: Pointer to Tx/Rx Message management info based on channel type
716 spin_lock_irqsave(&minfo->xfer_lock, flags);
717 if (hlist_empty(&minfo->free_xfers)) {
718 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
719 return ERR_PTR(-ENOMEM);
723 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
724 hlist_del_init(&xfer->node);
730 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
732 refcount_set(&xfer->users, 1);
733 atomic_set(&xfer->busy, SCMI_XFER_FREE);
734 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
740 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
746 * Return: A valid xfer on Success, or an error-pointer otherwise
753 xfer = scmi_xfer_get(handle, &info->tx_minfo);
755 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
761 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
769 * protocol in range is allowed, re-using the Base channel, so as to enable
780 cinfo = idr_find(&info->tx_idr, protocol_id);
783 return ERR_PTR(-EINVAL);
785 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
787 return ERR_PTR(-EINVAL);
788 dev_warn_once(handle->dev,
797 * __scmi_xfer_put() - Release a message
799 * @minfo: Pointer to Tx/Rx Message management info based on channel type
812 spin_lock_irqsave(&minfo->xfer_lock, flags);
813 if (refcount_dec_and_test(&xfer->users)) {
814 if (xfer->pending) {
816 hash_del(&xfer->node);
817 xfer->pending = false;
819 hlist_add_head(&xfer->node, &minfo->free_xfers);
821 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
825 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
837 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
838 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
839 return __scmi_xfer_put(&info->tx_minfo, xfer);
843 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
845 * @minfo: Pointer to Tx/Rx Message management info based on channel type
859 if (test_bit(xfer_id, minfo->xfer_alloc_table))
860 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
862 return xfer ?: ERR_PTR(-EINVAL);
866 * scmi_bad_message_trace - A helper to trace weird messages
874 * timed-out message that arrives and as such, can be traced only referring to
881 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
898 trace_scmi_msg_dump(info->id, cinfo->id,
905 * scmi_msg_response_validate - Validate message type against state of related
914 * related synchronous response (Out-of-Order Delayed Response) the missing
917 * SCMI transport can deliver such out-of-order responses.
919 * Context: Assumes to be called with xfer->lock already acquired.
930 * delayed response we're not prepared to handle: bail-out safely
933 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
934 dev_err(cinfo->dev,
936 xfer->hdr.seq);
937 return -EINVAL;
940 switch (xfer->state) {
947 xfer->hdr.status = SCMI_SUCCESS;
948 xfer->state = SCMI_XFER_RESP_OK;
949 complete(&xfer->done);
950 dev_warn(cinfo->dev,
952 xfer->hdr.seq);
957 return -EINVAL;
961 return -EINVAL;
968 * scmi_xfer_state_update - Update xfer state
981 xfer->hdr.type = msg_type;
984 if (xfer->hdr.type == MSG_TYPE_COMMAND)
985 xfer->state = SCMI_XFER_RESP_OK;
987 xfer->state = SCMI_XFER_DRESP_OK;
994 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
1000 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
1017 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1018 struct scmi_xfers_info *minfo = &info->tx_minfo;
1023 spin_lock_irqsave(&minfo->xfer_lock, flags);
1026 dev_err(cinfo->dev,
1029 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1032 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
1036 refcount_inc(&xfer->users);
1037 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1039 spin_lock_irqsave(&xfer->lock, flags);
1052 spin_unlock_irqrestore(&xfer->lock, flags);
1055 dev_err(cinfo->dev,
1056 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1057 msg_type, xfer_id, msg_hdr, xfer->state);
1060 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
1064 xfer = ERR_PTR(-EINVAL);
1073 atomic_set(&xfer->busy, SCMI_XFER_FREE);
1074 __scmi_xfer_put(&info->tx_minfo, xfer);
1080 if (!cinfo->is_p2a) {
1081 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1085 if (info->desc->ops->clear_channel)
1086 info->desc->ops->clear_channel(cinfo);
1093 struct device *dev = cinfo->dev;
1094 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1095 struct scmi_xfers_info *minfo = &info->rx_minfo;
1099 xfer = scmi_xfer_get(cinfo->handle, minfo);
1105 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
1111 unpack_scmi_header(msg_hdr, &xfer->hdr);
1113 /* Ensure order between xfer->priv store and following ops */
1114 smp_store_mb(xfer->priv, priv);
1115 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1118 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1119 xfer->hdr.id, "NOTI", xfer->hdr.seq,
1120 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1121 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
1123 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1124 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1126 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1127 xfer->hdr.protocol_id, xfer->hdr.seq,
1131 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1132 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1133 cinfo->id);
1145 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1150 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1157 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1158 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1159 xfer->rx.len = info->desc->max_msg_size;
1162 /* Ensure order between xfer->priv store and following ops */
1163 smp_store_mb(xfer->priv, priv);
1164 info->desc->ops->fetch_response(cinfo, xfer);
1166 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1167 xfer->hdr.id,
1168 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1171 xfer->hdr.seq, xfer->hdr.status,
1172 xfer->rx.buf, xfer->rx.len);
1174 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1175 xfer->hdr.protocol_id, xfer->hdr.seq,
1176 xfer->hdr.type);
1178 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1180 complete(xfer->async_done);
1181 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
1183 complete(&xfer->done);
1184 scmi_inc_count(info->dbg->counters, RESPONSE_OK);
1190 * RX path since it will be already queued at the end of the TX
1193 if (!xfer->hdr.poll_completion)
1194 scmi_raw_message_report(info->raw, xfer,
1196 cinfo->id);
1203 * scmi_rx_callback() - callback for receiving messages
1236 * xfer_put() - Release a transmit message
1245 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1247 __scmi_xfer_put(&info->tx_minfo, xfer);
1253 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1256 * Poll also on xfer->done so that polling can be forcibly terminated
1257 * in case of out-of-order receptions of delayed responses
1259 return info->desc->ops->poll_done(cinfo, xfer) ||
1260 try_wait_for_completion(&xfer->done) ||
1269 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1271 if (xfer->hdr.poll_completion) {
1276 if (!desc->sync_cmds_completed_on_ret) {
1287 "timed out in resp(caller: %pS) - polling\n",
1289 ret = -ETIMEDOUT;
1290 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
1298 * Do not fetch_response if an out-of-order delayed
1301 spin_lock_irqsave(&xfer->lock, flags);
1302 if (xfer->state == SCMI_XFER_SENT_OK) {
1303 desc->ops->fetch_response(cinfo, xfer);
1304 xfer->state = SCMI_XFER_RESP_OK;
1306 spin_unlock_irqrestore(&xfer->lock, flags);
1309 trace_scmi_msg_dump(info->id, cinfo->id,
1310 xfer->hdr.protocol_id, xfer->hdr.id,
1313 xfer->hdr.seq, xfer->hdr.status,
1314 xfer->rx.buf, xfer->rx.len);
1315 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
1318 scmi_raw_message_report(info->raw, xfer,
1320 cinfo->id);
1325 if (!wait_for_completion_timeout(&xfer->done,
1329 ret = -ETIMEDOUT;
1330 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
1338 * scmi_wait_for_message_response - An helper to group all the possible ways of
1344 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1345 * configuration flags like xfer->hdr.poll_completion.
1352 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1353 struct device *dev = info->dev;
1355 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1356 xfer->hdr.protocol_id, xfer->hdr.seq,
1357 info->desc->max_rx_timeout_ms,
1358 xfer->hdr.poll_completion);
1360 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1361 info->desc->max_rx_timeout_ms);
1365 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1379 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1380 struct device *dev = info->dev;
1382 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1384 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1385 pack_scmi_header(&xfer->hdr));
1391 * do_xfer() - Do one transfer
1396 * Return: -ETIMEDOUT in case of no response, if transmit error,
1405 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1406 struct device *dev = info->dev;
1410 if (xfer->hdr.poll_completion &&
1411 !is_transport_polling_capable(info->desc)) {
1414 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
1415 return -EINVAL;
1418 cinfo = idr_find(&info->tx_idr, pi->proto->id);
1420 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
1421 return -EINVAL;
1424 if (is_polling_enabled(cinfo, info->desc))
1425 xfer->hdr.poll_completion = true;
1432 xfer->hdr.protocol_id = pi->proto->id;
1433 reinit_completion(&xfer->done);
1435 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1436 xfer->hdr.protocol_id, xfer->hdr.seq,
1437 xfer->hdr.poll_completion);
1440 xfer->hdr.status = SCMI_SUCCESS;
1441 xfer->state = SCMI_XFER_SENT_OK;
1444 * on xfer->state due to the monotonically increasing tokens allocation,
1445 * we must anyway ensure xfer->state initialization is not re-ordered
1446 * after the .send_message() to be sure that on the RX path an early
1447 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1451 ret = info->desc->ops->send_message(cinfo, xfer);
1454 scmi_inc_count(info->dbg->counters, SENT_FAIL);
1458 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1459 xfer->hdr.id, "CMND", xfer->hdr.seq,
1460 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1461 scmi_inc_count(info->dbg->counters, SENT_OK);
1464 if (!ret && xfer->hdr.status) {
1465 ret = scmi_to_linux_errno(xfer->hdr.status);
1466 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
1469 if (info->desc->ops->mark_txdone)
1470 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1472 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1473 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1482 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1484 xfer->rx.len = info->desc->max_msg_size;
1488 * do_xfer_with_response() - Do one transfer and wait until the delayed
1495 * it could cause long busy-waiting here, so ignore polling for the delayed
1508 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1517 xfer->async_done = &async_response;
1525 WARN_ON_ONCE(xfer->hdr.poll_completion);
1529 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1530 dev_err(ph->dev,
1533 ret = -ETIMEDOUT;
1534 } else if (xfer->hdr.status) {
1535 ret = scmi_to_linux_errno(xfer->hdr.status);
1539 xfer->async_done = NULL;
1544 * xfer_get_init() - Allocate and initialise one message for transmit
1565 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1566 struct scmi_xfers_info *minfo = &info->tx_minfo;
1567 struct device *dev = info->dev;
1570 if (rx_size > info->desc->max_msg_size ||
1571 tx_size > info->desc->max_msg_size)
1572 return -ERANGE;
1574 xfer = scmi_xfer_get(pi->handle, minfo);
1581 /* Pick a sequence number and register this xfer as in-flight */
1584 dev_err(pi->handle->dev,
1590 xfer->tx.len = tx_size;
1591 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1592 xfer->hdr.type = MSG_TYPE_COMMAND;
1593 xfer->hdr.id = msg_id;
1594 xfer->hdr.poll_completion = false;
1602 * version_get() - command to get the revision of the SCMI entity
1623 rev_info = t->rx.buf;
1632 * scmi_set_protocol_priv - Set protocol specific data at init time
1645 pi->priv = priv;
1646 pi->version = version;
1652 * scmi_get_protocol_priv - Set protocol specific data at init time
1662 return pi->priv;
1680 * scmi_common_extended_name_get - Common helper to get extended resources name
1701 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1705 put_unaligned_le32(res_id, t->tx.buf);
1707 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1708 resp = t->rx.buf;
1710 ret = ph->xops->do_xfer(ph, t);
1712 strscpy(name, resp->name, len);
1714 ph->xops->xfer_put(ph, t);
1717 dev_warn(ph->dev,
1718 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1724 * scmi_common_get_max_msg_size - Get maximum message size
1732 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1734 return info->desc->max_msg_size;
1738 * struct scmi_iterator - Iterator descriptor
1740 * a proper custom command payload for each multi-part command request.
1741 * @resp: A reference to the response RX buffer; used by @update_state and
1742 * @process_response to parse the multi-part replies.
1748 * internal routines and by the caller-provided @scmi_iterator_ops.
1770 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1772 return ERR_PTR(-ENOMEM);
1774 i->ph = ph;
1775 i->ops = ops;
1776 i->priv = priv;
1778 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1780 devm_kfree(ph->dev, i);
1784 i->state.max_resources = max_resources;
1785 i->msg = i->t->tx.buf;
1786 i->resp = i->t->rx.buf;
1793 int ret = -EINVAL;
1799 if (!i || !i->ops || !i->ph)
1802 iops = i->ops;
1803 ph = i->ph;
1804 st = &i->state;
1807 iops->prepare_message(i->msg, st->desc_index, i->priv);
1808 ret = ph->xops->do_xfer(ph, i->t);
1812 st->rx_len = i->t->rx.len;
1813 ret = iops->update_state(st, i->resp, i->priv);
1817 if (st->num_returned > st->max_resources - st->desc_index) {
1818 dev_err(ph->dev,
1820 st->max_resources);
1821 ret = -EINVAL;
1825 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1826 st->loop_idx++) {
1827 ret = iops->process_response(ph, i->resp, st, i->priv);
1832 st->desc_index += st->num_returned;
1833 ph->xops->reset_rx_to_maxsz(ph, i->t);
1838 } while (st->num_returned && st->num_remaining);
1842 ph->xops->xfer_put(ph, i->t);
1843 devm_kfree(ph->dev, i);
1887 ret = -EINVAL;
1891 ret = ph->xops->xfer_get_init(ph, describe_id,
1896 info = t->tx.buf;
1897 info->domain = cpu_to_le32(domain);
1898 info->message_id = cpu_to_le32(message_id);
1905 ret = ph->xops->do_xfer(ph, t);
1909 resp = t->rx.buf;
1910 flags = le32_to_cpu(resp->attr);
1911 size = le32_to_cpu(resp->chan_size);
1913 ret = -EINVAL;
1918 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1920 phys_addr = le32_to_cpu(resp->chan_addr_low);
1921 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1922 addr = devm_ioremap(ph->dev, phys_addr, size);
1924 ret = -EADDRNOTAVAIL;
1931 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1933 ret = -ENOMEM;
1938 phys_addr = le32_to_cpu(resp->db_addr_low);
1939 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1940 addr = devm_ioremap(ph->dev, phys_addr, size);
1942 ret = -EADDRNOTAVAIL;
1946 db->addr = addr;
1947 db->width = size;
1948 db->set = le32_to_cpu(resp->db_set_lmask);
1949 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1950 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1951 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1956 ph->xops->xfer_put(ph, t);
1958 dev_dbg(ph->dev,
1960 pi->proto->id, message_id, domain);
1965 devm_kfree(ph->dev, db);
1971 ph->xops->xfer_put(ph, t);
1974 dev_warn(ph->dev,
1975 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1976 pi->proto->id, message_id, domain, ret);
1983 if (db->mask) \
1984 val = ioread##w(db->addr) & db->mask; \
1985 iowrite##w((u##w)db->set | val, db->addr); \
1990 if (!db || !db->addr)
1993 if (db->width == 1)
1995 else if (db->width == 2)
1997 else if (db->width == 4)
1999 else /* db->width == 8 */
2006 if (db->mask)
2007 val = ioread64_hi_lo(db->addr) & db->mask;
2008 iowrite64_hi_lo(db->set | val, db->addr);
2014 * scmi_protocol_msg_check - Check protocol message attributes
2037 put_unaligned_le32(message_id, t->tx.buf);
2040 *attributes = get_unaligned_le32(t->rx.buf);
2057 * scmi_revision_area_get - Retrieve version memory area.
2072 return pi->handle->version;
2076 * scmi_protocol_version_negotiate - Negotiate protocol version
2103 put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2106 pi->negotiated_version = pi->proto->supported_version;
2114 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2121 * all resources management is handled via a dedicated per-protocol devres
2133 int ret = -ENOMEM;
2136 const struct scmi_handle *handle = &info->handle;
2139 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2145 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2149 pi->gid = gid;
2150 pi->proto = proto;
2151 pi->handle = handle;
2152 pi->ph.dev = handle->dev;
2153 pi->ph.xops = &xfer_ops;
2154 pi->ph.hops = &helpers_ops;
2155 pi->ph.set_priv = scmi_set_protocol_priv;
2156 pi->ph.get_priv = scmi_get_protocol_priv;
2157 refcount_set(&pi->users, 1);
2158 /* proto->init is assured NON NULL by scmi_protocol_register */
2159 ret = pi->proto->instance_init(&pi->ph);
2163 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2165 if (ret != proto->id)
2172 if (pi->proto->events) {
2173 ret = scmi_register_protocol_events(handle, pi->proto->id,
2174 &pi->ph,
2175 pi->proto->events);
2177 dev_warn(handle->dev,
2178 "Protocol:%X - Events Registration Failed - err:%d\n",
2179 pi->proto->id, ret);
2182 devres_close_group(handle->dev, pi->gid);
2183 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2185 if (pi->version > proto->supported_version) {
2186 ret = scmi_protocol_version_negotiate(&pi->ph);
2188 dev_info(handle->dev,
2190 proto->id, pi->negotiated_version);
2192 dev_warn(handle->dev,
2194 pi->version, pi->proto->id);
2195 dev_warn(handle->dev,
2197 pi->proto->supported_version);
2206 devres_release_group(handle->dev, gid);
2212 * scmi_get_protocol_instance - Protocol initialization helper.
2218 * resource allocation with a dedicated per-protocol devres subgroup.
2221 * in particular returns -EPROBE_DEFER when the desired protocol could
2230 mutex_lock(&info->protocols_mtx);
2231 pi = idr_find(&info->protocols, protocol_id);
2234 refcount_inc(&pi->users);
2239 proto = scmi_protocol_get(protocol_id, &info->version);
2243 pi = ERR_PTR(-EPROBE_DEFER);
2245 mutex_unlock(&info->protocols_mtx);
2251 * scmi_protocol_acquire - Protocol acquire
2266 * scmi_protocol_release - Protocol de-initialization helper.
2270 * Remove one user for the specified protocol and triggers de-initialization
2271 * and resources de-allocation once the last user has gone.
2278 mutex_lock(&info->protocols_mtx);
2279 pi = idr_find(&info->protocols, protocol_id);
2283 if (refcount_dec_and_test(&pi->users)) {
2284 void *gid = pi->gid;
2286 if (pi->proto->events)
2289 if (pi->proto->instance_deinit)
2290 pi->proto->instance_deinit(&pi->ph);
2292 idr_remove(&info->protocols, protocol_id);
2294 scmi_protocol_put(pi->proto);
2296 devres_release_group(handle->dev, gid);
2297 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2302 mutex_unlock(&info->protocols_mtx);
2309 struct scmi_info *info = handle_to_scmi_info(pi->handle);
2311 info->protocols_imp = prot_imp;
2319 struct scmi_revision_info *rev = handle->version;
2321 if (!info->protocols_imp)
2324 for (i = 0; i < rev->num_protocols; i++)
2325 if (info->protocols_imp[i] == prot_id)
2339 scmi_protocol_release(dres->handle, dres->protocol_id);
2351 return ERR_PTR(-ENOMEM);
2353 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2359 dres->handle = sdev->handle;
2360 dres->protocol_id = protocol_id;
2361 devres_add(&sdev->dev, dres);
2367 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2378 * released, and possibly de-initialized on last user, once the SCMI driver
2391 return ERR_PTR(-EINVAL);
2397 *ph = &pi->ph;
2399 return pi->proto->ops;
2403 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2413 * released, and possibly de-initialized on last user, once the SCMI driver
2437 return dres->protocol_id == *((u8 *)data);
2441 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2453 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2459 * scmi_is_transport_atomic - Method to check if underlying transport for an
2474 ret = info->desc->atomic_enabled &&
2475 is_transport_polling_capable(info->desc);
2477 *atomic_threshold = info->desc->atomic_threshold;
2483 * scmi_handle_get() - Get the SCMI handle for a device
2502 if (dev->parent == info->dev) {
2503 info->users++;
2504 handle = &info->handle;
2514 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2523 * if null was passed, it returns -EINVAL;
2530 return -EINVAL;
2534 if (!WARN_ON(!info->users))
2535 info->users--;
2553 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2554 if (scmi_dev->handle)
2555 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2563 struct device *dev = sinfo->dev;
2564 const struct scmi_desc *desc = sinfo->desc;
2566 /* Pre-allocated messages, no more than what hdr.seq can support */
2567 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2569 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2570 info->max_msg, MSG_TOKEN_MAX);
2571 return -EINVAL;
2574 hash_init(info->pending_xfers);
2577 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2579 if (!info->xfer_alloc_table)
2580 return -ENOMEM;
2584 * pre-initialize the buffer pointer to pre-allocated buffers and
2587 INIT_HLIST_HEAD(&info->free_xfers);
2588 for (i = 0; i < info->max_msg; i++) {
2591 return -ENOMEM;
2593 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2595 if (!xfer->rx.buf)
2596 return -ENOMEM;
2598 xfer->tx.buf = xfer->rx.buf;
2599 init_completion(&xfer->done);
2600 spin_lock_init(&xfer->lock);
2603 hlist_add_head(&xfer->node, &info->free_xfers);
2606 spin_lock_init(&info->xfer_lock);
2613 const struct scmi_desc *desc = sinfo->desc;
2615 if (!desc->ops->get_max_msg) {
2616 sinfo->tx_minfo.max_msg = desc->max_msg;
2617 sinfo->rx_minfo.max_msg = desc->max_msg;
2621 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2623 return -EINVAL;
2624 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2626 /* RX channel is optional so can be skipped */
2627 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2629 sinfo->rx_minfo.max_msg =
2630 desc->ops->get_max_msg(base_cinfo);
2644 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2645 if (!ret && !idr_is_empty(&sinfo->rx_idr))
2646 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2662 idr = tx ? &info->tx_idr : &info->rx_idr;
2664 if (!info->desc->ops->chan_available(of_node, idx)) {
2666 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2667 return -EINVAL;
2671 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2673 return -ENOMEM;
2675 cinfo->is_p2a = !tx;
2676 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2677 cinfo->max_msg_size = info->desc->max_msg_size;
2681 idx ? "rx" : "tx", prot_id);
2683 tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2685 dev_err(info->dev,
2687 devm_kfree(info->dev, cinfo);
2688 return -EINVAL;
2692 cinfo->id = prot_id;
2693 cinfo->dev = &tdev->dev;
2694 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2697 scmi_device_destroy(info->dev, prot_id, name);
2698 devm_kfree(info->dev, cinfo);
2702 if (tx && is_polling_required(cinfo, info->desc)) {
2703 if (is_transport_polling_capable(info->desc))
2704 dev_info(&tdev->dev,
2705 "Enabled polling mode TX channel - prot_id:%d\n",
2708 dev_warn(&tdev->dev,
2715 dev_err(info->dev,
2720 scmi_device_destroy(info->dev, prot_id, name);
2721 devm_kfree(info->dev, cinfo);
2726 cinfo->handle = &info->handle;
2737 /* Rx is optional, report only memory errors */
2739 if (ret && ret != -ENOMEM)
2744 dev_err(info->dev,
2751 * scmi_channels_setup - Helper to initialize all required channels
2761 * Note that, even though a pair of TX/RX channels is associated to each
2771 struct device_node *top_np = info->dev->of_node;
2785 dev_err(info->dev,
2800 if (cinfo->dev) {
2801 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2802 struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2804 of_node_put(cinfo->dev->of_node);
2805 scmi_device_destroy(info->dev, id, sdev->name);
2806 cinfo->dev = NULL;
2817 idr_for_each(idr, info->desc->ops->chan_free, idr);
2827 scmi_cleanup_channels(info, &info->tx_idr);
2829 scmi_cleanup_channels(info, &info->rx_idr);
2839 if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2840 sdev->dev.parent != info->dev)
2849 scmi_handle_put(sdev->handle);
2850 sdev->handle = NULL;
2856 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2857 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2870 np = idr_find(&info->active_protocols, id_table->protocol_id);
2874 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2875 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2876 id_table->name, id_table->protocol_id);
2880 scmi_create_protocol_devices(np, info, id_table->protocol_id,
2881 id_table->name);
2884 scmi_destroy_protocol_devices(info, id_table->protocol_id,
2885 id_table->name);
2914 struct scmi_debug_info *dbg = filp->private_data;
2917 atomic_set(&dbg->counters[i], 0);
2938 &dbg->counters[idx]);
2950 debugfs_remove_recursive(dbg->top_dentry);
2951 kfree(dbg->name);
2952 kfree(dbg->type);
2962 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2966 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2967 if (!dbg->name) {
2968 devm_kfree(info->dev, dbg);
2972 of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2973 dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2974 if (!dbg->type) {
2975 kfree(dbg->name);
2976 devm_kfree(info->dev, dbg);
2980 snprintf(top_dir, 16, "%d", info->id);
2984 dbg->is_atomic = info->desc->atomic_enabled &&
2985 is_transport_polling_capable(info->desc);
2988 (char **)&dbg->name);
2991 (u32 *)&info->desc->atomic_threshold);
2993 debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2995 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2998 (u32 *)&info->desc->max_rx_timeout_ms);
3001 (u32 *)&info->desc->max_msg_size);
3004 (u32 *)&info->tx_minfo.max_msg);
3007 (u32 *)&info->rx_minfo.max_msg);
3012 dbg->top_dentry = top_dentry;
3014 if (devm_add_action_or_reset(info->dev,
3028 if (!info->dbg)
3029 return -EINVAL;
3032 idr_for_each_entry(&info->tx_idr, cinfo, id) {
3038 dev_warn(info->dev,
3039 "SCMI RAW - Error enumerating channels\n");
3043 if (!test_bit(cinfo->id, protos)) {
3044 channels[num_chans++] = cinfo->id;
3045 set_bit(cinfo->id, protos);
3049 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3050 info->id, channels, num_chans,
3051 info->desc, info->tx_minfo.max_msg);
3052 if (IS_ERR(info->raw)) {
3053 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3054 ret = PTR_ERR(info->raw);
3055 info->raw = NULL;
3067 if (!trans || !trans->supplier || !trans->core_ops)
3070 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3077 *trans->core_ops = &scmi_trans_core_ops;
3079 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3081 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3082 &trans->desc.max_rx_timeout_ms);
3083 if (ret && ret != -EINVAL)
3084 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3086 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3087 &trans->desc.max_msg_size);
3088 if (ret && ret != -EINVAL)
3089 dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3091 ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3092 &trans->desc.max_msg);
3093 if (ret && ret != -EINVAL)
3094 dev_err(dev, "Malformed arm,max-msg DT property.\n");
3097 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3098 trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
3099 trans->desc.max_msg);
3102 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3103 &trans->desc.atomic_threshold))
3106 trans->desc.atomic_threshold);
3108 return &trans->desc;
3119 struct device *dev = &pdev->dev;
3120 struct device_node *child, *np = dev->of_node;
3125 ret = -EINVAL;
3131 return -ENOMEM;
3133 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3134 if (info->id < 0)
3135 return info->id;
3137 info->dev = dev;
3138 info->desc = desc;
3139 info->bus_nb.notifier_call = scmi_bus_notifier;
3140 info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3141 INIT_LIST_HEAD(&info->node);
3142 idr_init(&info->protocols);
3143 mutex_init(&info->protocols_mtx);
3144 idr_init(&info->active_protocols);
3145 mutex_init(&info->devreq_mtx);
3148 idr_init(&info->tx_idr);
3149 idr_init(&info->rx_idr);
3151 handle = &info->handle;
3152 handle->dev = info->dev;
3153 handle->version = &info->version;
3154 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3155 handle->devm_protocol_get = scmi_devm_protocol_get;
3156 handle->devm_protocol_put = scmi_devm_protocol_put;
3157 handle->is_transport_atomic = scmi_is_transport_atomic;
3166 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3173 &info->dev_req_nb);
3186 info->dbg = scmi_debugfs_common_setup(info);
3187 if (!info->dbg)
3208 if (info->desc->atomic_enabled &&
3209 !is_transport_polling_capable(info->desc))
3229 list_add_tail(&info->node, &scmi_list);
3251 ret = idr_alloc(&info->active_protocols, child,
3267 scmi_raw_mode_cleanup(info->raw);
3268 scmi_notification_exit(&info->handle);
3271 &info->dev_req_nb);
3273 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3277 ida_free(&scmi_id, info->id);
3290 scmi_raw_mode_cleanup(info->raw);
3293 if (info->users)
3294 dev_warn(&pdev->dev,
3296 list_del(&info->node);
3299 scmi_notification_exit(&info->handle);
3301 mutex_lock(&info->protocols_mtx);
3302 idr_destroy(&info->protocols);
3303 mutex_unlock(&info->protocols_mtx);
3305 idr_for_each_entry(&info->active_protocols, child, id)
3307 idr_destroy(&info->active_protocols);
3310 &info->dev_req_nb);
3311 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3316 ida_free(&scmi_id, info->id);
3324 return sprintf(buf, "%u.%u\n", info->version.major_ver,
3325 info->version.minor_ver);
3334 return sprintf(buf, "0x%x\n", info->version.impl_ver);
3343 return sprintf(buf, "%s\n", info->version.vendor_id);
3352 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3367 .name = "arm-scmi",
3392 return -EINVAL;
3439 MODULE_ALIAS("platform:arm-scmi");