Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2025 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
48 #define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
64 * struct scmi_xfers_info - Structure to manage transfer information
72 * a number of xfers equal to the maximum allowed in-flight
75 * currently in-flight messages.
86 * struct scmi_protocol_instance - Describe an initialized protocol instance.
89 * @gid: A reference for per-protocol devres management.
97 * This field is NON-zero when a successful negotiation
119 * struct scmi_info - Structure representing a SCMI instance
125 * implementation version and (sub-)vendor identification.
272 proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id, in scmi_vendor_protocol_get()
273 version->sub_vendor_id, in scmi_vendor_protocol_get()
274 version->impl_ver); in scmi_vendor_protocol_get()
279 protocol_id, version->vendor_id); in scmi_vendor_protocol_get()
283 protocol_id, version->vendor_id); in scmi_vendor_protocol_get()
292 version->vendor_id, in scmi_vendor_protocol_get()
293 version->sub_vendor_id, in scmi_vendor_protocol_get()
294 version->impl_ver); in scmi_vendor_protocol_get()
298 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", in scmi_vendor_protocol_get()
299 protocol_id, proto->vendor_id ?: "", in scmi_vendor_protocol_get()
300 proto->sub_vendor_id ?: "", proto->impl_ver); in scmi_vendor_protocol_get()
315 if (!proto || !try_module_get(proto->owner)) { in scmi_protocol_get()
328 module_put(proto->owner); in scmi_protocol_put()
333 if (!proto->vendor_id) { in scmi_vendor_protocol_check()
334 pr_err("missing vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
335 return -EINVAL; in scmi_vendor_protocol_check()
338 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
339 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
340 return -EINVAL; in scmi_vendor_protocol_check()
343 if (proto->sub_vendor_id && in scmi_vendor_protocol_check()
344 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
346 proto->id); in scmi_vendor_protocol_check()
347 return -EINVAL; in scmi_vendor_protocol_check()
360 return -EINVAL; in scmi_protocol_register()
363 if (!proto->instance_init) { in scmi_protocol_register()
364 pr_err("missing init for protocol 0x%x\n", proto->id); in scmi_protocol_register()
365 return -EINVAL; in scmi_protocol_register()
368 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE && in scmi_protocol_register()
370 return -EINVAL; in scmi_protocol_register()
376 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_register()
377 proto->sub_vendor_id, in scmi_protocol_register()
378 proto->impl_ver); in scmi_protocol_register()
380 return -EINVAL; in scmi_protocol_register()
384 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n", in scmi_protocol_register()
385 proto->id, ret); in scmi_protocol_register()
389 pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n", in scmi_protocol_register()
390 proto->id, proto->vendor_id, proto->sub_vendor_id, in scmi_protocol_register()
391 proto->impl_ver); in scmi_protocol_register()
401 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_unregister()
402 proto->sub_vendor_id, in scmi_protocol_unregister()
403 proto->impl_ver); in scmi_protocol_unregister()
409 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_unregister()
414 * scmi_create_protocol_devices - Create devices for all pending requests for
428 mutex_lock(&info->devreq_mtx); in scmi_create_protocol_devices()
429 scmi_device_create(np, info->dev, prot_id, name); in scmi_create_protocol_devices()
430 mutex_unlock(&info->devreq_mtx); in scmi_create_protocol_devices()
436 mutex_lock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
437 scmi_device_destroy(info->dev, prot_id, name); in scmi_destroy_protocol_devices()
438 mutex_unlock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
446 info->notify_priv = priv; in scmi_notification_instance_data_set()
457 return info->notify_priv; in scmi_notification_instance_data_get()
461 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
467 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
468 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
469 * of incorrect association of a late and expired xfer with a live in-flight
470 * transaction, both happening to re-use the same token identifier.
472 * Since platform is NOT required to answer our request in-order we should
475 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
478 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
482 * X = used in-flight
485 * ------
487 * |- xfer_id picked
488 * -----------+----------------------------------------------------------
490 * ----------------------------------------------------------------------
492 * |- next_token
494 * Out-of-order pending at start
495 * -----------------------------
497 * |- xfer_id picked, last_token fixed
498 * -----+----------------------------------------------------------------
500 * ----------------------------------------------------------------------
502 * |- next_token
505 * Out-of-order pending at end
506 * ---------------------------
508 * |- xfer_id picked, last_token fixed
509 * -----+----------------------------------------------------------------
511 * ----------------------------------------------------------------------
513 * |- next_token
525 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
526 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
532 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
535 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
539 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
543 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
547 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
548 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
551 return -ENOMEM; in scmi_xfer_token_set()
554 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
556 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
558 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
564 * scmi_xfer_token_clear - Release the token
572 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
576 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
581 * Note that this helper assumes that the xfer to be registered as in-flight
594 /* Set in-flight */ in scmi_xfer_inflight_register_unlocked()
595 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_inflight_register_unlocked()
596 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); in scmi_xfer_inflight_register_unlocked()
597 scmi_inc_count(info->dbg, XFERS_INFLIGHT); in scmi_xfer_inflight_register_unlocked()
599 xfer->pending = true; in scmi_xfer_inflight_register_unlocked()
603 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
611 * same sequence number is currently still registered as in-flight.
613 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
622 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
623 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) in scmi_xfer_inflight_register()
626 ret = -EBUSY; in scmi_xfer_inflight_register()
627 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
633 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
646 return scmi_xfer_inflight_register(xfer, &info->tx_minfo); in scmi_xfer_raw_inflight_register()
650 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
651 * as pending in-flight
664 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
669 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
675 * scmi_xfer_get() - Allocate one message
701 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
702 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
703 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
704 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
708 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
709 hlist_del_init(&xfer->node); in scmi_xfer_get()
715 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
717 refcount_set(&xfer->users, 1); in scmi_xfer_get()
718 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
719 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
725 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
731 * Return: A valid xfer on Success, or an error-pointer otherwise
738 xfer = scmi_xfer_get(handle, &info->tx_minfo); in scmi_xfer_raw_get()
740 xfer->flags |= SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_get()
746 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
754 * protocol in range is allowed, re-using the Base channel, so as to enable
765 cinfo = idr_find(&info->tx_idr, protocol_id); in scmi_xfer_raw_channel_get()
768 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
770 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); in scmi_xfer_raw_channel_get()
772 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
773 dev_warn_once(handle->dev, in scmi_xfer_raw_channel_get()
782 * __scmi_xfer_put() - Release a message
797 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
798 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
799 if (xfer->pending) { in __scmi_xfer_put()
803 hash_del(&xfer->node); in __scmi_xfer_put()
804 xfer->pending = false; in __scmi_xfer_put()
806 scmi_dec_count(info->dbg, XFERS_INFLIGHT); in __scmi_xfer_put()
808 xfer->flags = 0; in __scmi_xfer_put()
809 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
811 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
815 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
827 return __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_raw_put()
831 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
847 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
848 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
850 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
854 * scmi_bad_message_trace - A helper to trace weird messages
862 * timed-out message that arrives and as such, can be traced only referring to
869 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_bad_message_trace()
886 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_bad_message_trace()
893 * scmi_msg_response_validate - Validate message type against state of related
902 * related synchronous response (Out-of-Order Delayed Response) the missing
905 * SCMI transport can deliver such out-of-order responses.
907 * Context: Assumes to be called with xfer->lock already acquired.
917 * a buggy platform could wrongly reply feeding us an unexpected in scmi_msg_response_validate()
918 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
921 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
922 dev_err(cinfo->dev, in scmi_msg_response_validate()
924 xfer->hdr.seq); in scmi_msg_response_validate()
925 return -EINVAL; in scmi_msg_response_validate()
928 switch (xfer->state) { in scmi_msg_response_validate()
935 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
936 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
937 complete(&xfer->done); in scmi_msg_response_validate()
938 dev_warn(cinfo->dev, in scmi_msg_response_validate()
940 xfer->hdr.seq); in scmi_msg_response_validate()
945 return -EINVAL; in scmi_msg_response_validate()
949 return -EINVAL; in scmi_msg_response_validate()
956 * scmi_xfer_state_update - Update xfer state
969 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
972 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
973 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
975 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
982 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
988 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
1005 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
1006 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
1011 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1014 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
1017 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1020 scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); in scmi_xfer_command_acquire()
1024 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
1025 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1027 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
1040 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
1043 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
1044 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
1045 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
1048 scmi_inc_count(info->dbg, ERR_MSG_INVALID); in scmi_xfer_command_acquire()
1052 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
1061 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
1062 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
1068 if (!cinfo->is_p2a) { in scmi_clear_channel()
1069 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); in scmi_clear_channel()
1073 if (info->desc->ops->clear_channel) in scmi_clear_channel()
1074 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
1081 struct device *dev = cinfo->dev; in scmi_handle_notification()
1082 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
1083 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
1087 xfer = scmi_xfer_get(cinfo->handle, minfo); in scmi_handle_notification()
1093 scmi_inc_count(info->dbg, ERR_MSG_NOMEM); in scmi_handle_notification()
1099 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
1101 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
1102 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
1103 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
1106 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_notification()
1107 xfer->hdr.id, "NOTI", xfer->hdr.seq, in scmi_handle_notification()
1108 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
1109 scmi_inc_count(info->dbg, NOTIFICATION_OK); in scmi_handle_notification()
1111 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
1112 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
1114 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
1115 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
1119 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_handle_notification()
1120 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, in scmi_handle_notification()
1121 cinfo->id); in scmi_handle_notification()
1133 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
1138 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); in scmi_handle_response()
1146 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
1147 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
1150 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
1151 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
1152 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
1154 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_response()
1155 xfer->hdr.id, in scmi_handle_response()
1156 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
1159 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
1160 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
1162 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
1163 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
1164 xfer->hdr.type); in scmi_handle_response()
1166 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
1168 complete(xfer->async_done); in scmi_handle_response()
1169 scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); in scmi_handle_response()
1171 complete(&xfer->done); in scmi_handle_response()
1172 scmi_inc_count(info->dbg, RESPONSE_OK); in scmi_handle_response()
1181 if (!xfer->hdr.poll_completion || in scmi_handle_response()
1182 xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
1183 scmi_raw_message_report(info->raw, xfer, in scmi_handle_response()
1185 cinfo->id); in scmi_handle_response()
1192 * scmi_rx_callback() - callback for receiving messages
1225 * xfer_put() - Release a transmit message
1234 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
1236 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
1243 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
1246 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
1247 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
1249 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
1250 (*ooo = try_wait_for_completion(&xfer->done)) || in scmi_xfer_done_no_timeout()
1259 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1261 if (xfer->hdr.poll_completion) { in scmi_wait_for_reply()
1266 if (!desc->sync_cmds_completed_on_ret) { in scmi_wait_for_reply()
1277 if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { in scmi_wait_for_reply()
1279 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_reply()
1281 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1282 scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); in scmi_wait_for_reply()
1290 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_reply()
1293 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_reply()
1294 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_reply()
1295 desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_reply()
1296 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_reply()
1298 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_reply()
1301 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_wait_for_reply()
1302 xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_reply()
1305 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_reply()
1306 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_reply()
1307 scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); in scmi_wait_for_reply()
1310 scmi_raw_message_report(info->raw, xfer, in scmi_wait_for_reply()
1312 cinfo->id); in scmi_wait_for_reply()
1317 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_reply()
1321 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1322 scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); in scmi_wait_for_reply()
1330 * scmi_wait_for_message_response - An helper to group all the possible ways of
1336 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1337 * configuration flags like xfer->hdr.poll_completion.
1344 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
1345 struct device *dev = info->dev; in scmi_wait_for_message_response()
1347 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
1348 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
1349 info->desc->max_rx_timeout_ms, in scmi_wait_for_message_response()
1350 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
1352 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, in scmi_wait_for_message_response()
1353 info->desc->max_rx_timeout_ms); in scmi_wait_for_message_response()
1357 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1371 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_raw_wait_for_message_response()
1372 struct device *dev = info->dev; in scmi_xfer_raw_wait_for_message_response()
1374 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); in scmi_xfer_raw_wait_for_message_response()
1376 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", in scmi_xfer_raw_wait_for_message_response()
1377 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_wait_for_message_response()
1383 * do_xfer() - Do one transfer
1388 * Return: -ETIMEDOUT in case of no response, if transmit error,
1397 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
1398 struct device *dev = info->dev; in do_xfer()
1402 if (xfer->hdr.poll_completion && in do_xfer()
1403 !is_transport_polling_capable(info->desc)) { in do_xfer()
1406 scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); in do_xfer()
1407 return -EINVAL; in do_xfer()
1410 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
1412 scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); in do_xfer()
1413 return -EINVAL; in do_xfer()
1416 if (is_polling_enabled(cinfo, info->desc)) in do_xfer()
1417 xfer->hdr.poll_completion = true; in do_xfer()
1424 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
1425 reinit_completion(&xfer->done); in do_xfer()
1427 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1428 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
1429 xfer->hdr.poll_completion, in do_xfer()
1430 scmi_inflight_count(&info->handle)); in do_xfer()
1433 xfer->hdr.status = SCMI_SUCCESS; in do_xfer()
1434 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
1437 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
1438 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
1440 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
1444 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
1447 scmi_inc_count(info->dbg, SENT_FAIL); in do_xfer()
1451 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in do_xfer()
1452 xfer->hdr.id, "CMND", xfer->hdr.seq, in do_xfer()
1453 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); in do_xfer()
1454 scmi_inc_count(info->dbg, SENT_OK); in do_xfer()
1457 if (!ret && xfer->hdr.status) { in do_xfer()
1458 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
1459 scmi_inc_count(info->dbg, ERR_PROTOCOL); in do_xfer()
1462 if (info->desc->ops->mark_txdone) in do_xfer()
1463 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
1465 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1466 xfer->hdr.protocol_id, xfer->hdr.seq, ret, in do_xfer()
1467 scmi_inflight_count(&info->handle)); in do_xfer()
1476 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
1478 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
1482 * do_xfer_with_response() - Do one transfer and wait until the delayed
1488 * Using asynchronous commands in atomic/polling mode should be avoided since
1489 * it could cause long busy-waiting here, so ignore polling for the delayed
1494 * command even if made available, when an atomic transport is detected, and
1500 * when using atomic/polling mode)
1502 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1511 xfer->async_done = &async_response; in do_xfer_with_response()
1515 * not have been used when requiring an atomic/poll context; WARN and in do_xfer_with_response()
1519 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
1523 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1524 dev_err(ph->dev, in do_xfer_with_response()
1527 ret = -ETIMEDOUT; in do_xfer_with_response()
1528 } else if (xfer->hdr.status) { in do_xfer_with_response()
1529 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1533 xfer->async_done = NULL; in do_xfer_with_response()
1538 * xfer_get_init() - Allocate and initialise one message for transmit
1559 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1560 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1561 struct device *dev = info->dev; in xfer_get_init()
1564 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1565 tx_size > info->desc->max_msg_size) in xfer_get_init()
1566 return -ERANGE; in xfer_get_init()
1568 xfer = scmi_xfer_get(pi->handle, minfo); in xfer_get_init()
1575 /* Pick a sequence number and register this xfer as in-flight */ in xfer_get_init()
1578 dev_err(pi->handle->dev, in xfer_get_init()
1584 xfer->tx.len = tx_size; in xfer_get_init()
1585 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1586 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1587 xfer->hdr.id = msg_id; in xfer_get_init()
1588 xfer->hdr.poll_completion = false; in xfer_get_init()
1596 * version_get() - command to get the revision of the SCMI entity
1617 rev_info = t->rx.buf; in version_get()
1626 * scmi_set_protocol_priv - Set protocol specific data at init time
1639 pi->priv = priv; in scmi_set_protocol_priv()
1640 pi->version = version; in scmi_set_protocol_priv()
1646 * scmi_get_protocol_priv - Set protocol specific data at init time
1656 return pi->priv; in scmi_get_protocol_priv()
1674 * scmi_common_extended_name_get - Common helper to get extended resources name
1695 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t); in scmi_common_extended_name_get()
1699 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1701 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id)); in scmi_common_extended_name_get()
1702 resp = t->rx.buf; in scmi_common_extended_name_get()
1704 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1706 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1708 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1711 dev_warn(ph->dev, in scmi_common_extended_name_get()
1712 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1718 * scmi_common_get_max_msg_size - Get maximum message size
1726 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_common_get_max_msg_size()
1728 return info->desc->max_msg_size; in scmi_common_get_max_msg_size()
1732 * scmi_protocol_msg_check - Check protocol message attributes
1755 put_unaligned_le32(message_id, t->tx.buf); in scmi_protocol_msg_check()
1758 *attributes = get_unaligned_le32(t->rx.buf); in scmi_protocol_msg_check()
1765 * struct scmi_iterator - Iterator descriptor
1767 * a proper custom command payload for each multi-part command request.
1769 * @process_response to parse the multi-part replies.
1775 * internal routines and by the caller-provided @scmi_iterator_ops.
1797 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1799 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1801 i->ph = ph; in scmi_iterator_init()
1802 i->ops = ops; in scmi_iterator_init()
1803 i->priv = priv; in scmi_iterator_init()
1805 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1807 devm_kfree(ph->dev, i); in scmi_iterator_init()
1811 i->state.max_resources = max_resources; in scmi_iterator_init()
1812 i->msg = i->t->tx.buf; in scmi_iterator_init()
1813 i->resp = i->t->rx.buf; in scmi_iterator_init()
1820 int ret = -EINVAL; in scmi_iterator_run()
1826 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1829 iops = i->ops; in scmi_iterator_run()
1830 ph = i->ph; in scmi_iterator_run()
1831 st = &i->state; in scmi_iterator_run()
1834 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1835 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1839 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1840 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1844 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1845 dev_err(ph->dev, in scmi_iterator_run()
1847 st->max_resources); in scmi_iterator_run()
1848 ret = -EINVAL; in scmi_iterator_run()
1852 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1853 st->loop_idx++) { in scmi_iterator_run()
1854 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1859 st->desc_index += st->num_returned; in scmi_iterator_run()
1860 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1865 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1869 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1870 devm_kfree(ph->dev, i); in scmi_iterator_run()
1898 if (pi->proto->id == SCMI_PROTOCOL_PERF && \
1925 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1926 "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", in scmi_common_fastchannel_init()
1927 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1932 ret = -EINVAL; in scmi_common_fastchannel_init()
1936 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1941 info = t->tx.buf; in scmi_common_fastchannel_init()
1942 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1943 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1950 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1954 resp = t->rx.buf; in scmi_common_fastchannel_init()
1955 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1956 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1958 ret = -EINVAL; in scmi_common_fastchannel_init()
1963 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0); in scmi_common_fastchannel_init()
1965 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1966 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1967 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1969 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1976 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1978 ret = -ENOMEM; in scmi_common_fastchannel_init()
1983 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1984 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1985 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1987 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1991 db->addr = addr; in scmi_common_fastchannel_init()
1992 db->width = size; in scmi_common_fastchannel_init()
1993 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1994 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1995 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1996 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
2001 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
2003 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
2005 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
2010 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
2016 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
2019 dev_warn(ph->dev, in scmi_common_fastchannel_init()
2020 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
2021 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
2028 if (db->mask) \
2029 val = ioread##w(db->addr) & db->mask; \
2030 iowrite##w((u##w)db->set | val, db->addr); \
2035 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
2038 if (db->width == 1) in scmi_common_fastchannel_db_ring()
2040 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
2042 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
2044 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
2059 * scmi_revision_area_get - Retrieve version memory area.
2074 return pi->handle->version; in scmi_revision_area_get()
2078 * scmi_protocol_version_negotiate - Negotiate protocol version
2105 put_unaligned_le32(pi->proto->supported_version, t->tx.buf); in scmi_protocol_version_negotiate()
2108 pi->negotiated_version = pi->proto->supported_version; in scmi_protocol_version_negotiate()
2116 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2123 * all resources management is handled via a dedicated per-protocol devres
2135 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
2138 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
2141 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2147 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2151 pi->gid = gid; in scmi_alloc_init_protocol_instance()
2152 pi->proto = proto; in scmi_alloc_init_protocol_instance()
2153 pi->handle = handle; in scmi_alloc_init_protocol_instance()
2154 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
2155 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
2156 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
2157 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
2158 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
2159 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
2160 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
2161 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
2165 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
2167 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
2174 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
2175 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
2176 &pi->ph, in scmi_alloc_init_protocol_instance()
2177 pi->proto->events); in scmi_alloc_init_protocol_instance()
2179 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2180 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
2181 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
2184 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
2185 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
2187 if (pi->version > proto->supported_version) { in scmi_alloc_init_protocol_instance()
2188 ret = scmi_protocol_version_negotiate(&pi->ph); in scmi_alloc_init_protocol_instance()
2190 dev_info(handle->dev, in scmi_alloc_init_protocol_instance()
2192 proto->id, pi->negotiated_version); in scmi_alloc_init_protocol_instance()
2194 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2196 pi->version, pi->proto->id); in scmi_alloc_init_protocol_instance()
2197 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2199 pi->proto->supported_version); in scmi_alloc_init_protocol_instance()
2208 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
2214 * scmi_get_protocol_instance - Protocol initialization helper.
2220 * resource allocation with a dedicated per-protocol devres subgroup.
2223 * in particular returns -EPROBE_DEFER when the desired protocol could
2232 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
2233 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
2236 refcount_inc(&pi->users); in scmi_get_protocol_instance()
2241 proto = scmi_protocol_get(protocol_id, &info->version); in scmi_get_protocol_instance()
2245 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
2247 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
2253 * scmi_protocol_acquire - Protocol acquire
2268 * scmi_protocol_release - Protocol de-initialization helper.
2272 * Remove one user for the specified protocol and triggers de-initialization
2273 * and resources de-allocation once the last user has gone.
2280 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
2281 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
2285 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
2286 void *gid = pi->gid; in scmi_protocol_release()
2288 if (pi->proto->events) in scmi_protocol_release()
2291 if (pi->proto->instance_deinit) in scmi_protocol_release()
2292 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
2294 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
2296 scmi_protocol_put(pi->proto); in scmi_protocol_release()
2298 devres_release_group(handle->dev, gid); in scmi_protocol_release()
2299 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
2304 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
2311 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
2313 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
2321 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
2323 if (!info->protocols_imp) in scmi_is_protocol_implemented()
2326 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
2327 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
2341 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
2353 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
2355 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
2361 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
2362 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
2363 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
2369 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2380 * released, and possibly de-initialized on last user, once the SCMI driver
2393 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
2399 *ph = &pi->ph; in scmi_devm_protocol_get()
2401 return pi->proto->ops; in scmi_devm_protocol_get()
2405 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2415 * released, and possibly de-initialized on last user, once the SCMI driver
2439 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
2443 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2455 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
2461 * scmi_is_transport_atomic - Method to check if underlying transport for an
2462 * SCMI instance is configured as atomic.
2466 * configured threshold for atomic operations.
2468 * Return: True if transport is configured as atomic
2476 ret = info->desc->atomic_enabled && in scmi_is_transport_atomic()
2477 is_transport_polling_capable(info->desc); in scmi_is_transport_atomic()
2479 *atomic_threshold = info->desc->atomic_threshold; in scmi_is_transport_atomic()
2485 * scmi_handle_get() - Get the SCMI handle for a device
2504 if (dev->parent == info->dev) { in scmi_handle_get()
2505 info->users++; in scmi_handle_get()
2506 handle = &info->handle; in scmi_handle_get()
2516 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2525 * if null was passed, it returns -EINVAL;
2532 return -EINVAL; in scmi_handle_put()
2536 if (!WARN_ON(!info->users)) in scmi_handle_put()
2537 info->users--; in scmi_handle_put()
2555 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); in scmi_set_handle()
2556 if (scmi_dev->handle) in scmi_set_handle()
2557 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); in scmi_set_handle()
2565 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
2566 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
2568 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
2569 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
2571 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
2572 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
2573 return -EINVAL; in __scmi_xfer_info_init()
2576 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
2579 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, in __scmi_xfer_info_init()
2581 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
2582 return -ENOMEM; in __scmi_xfer_info_init()
2586 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
2589 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
2590 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
2593 return -ENOMEM; in __scmi_xfer_info_init()
2595 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
2597 if (!xfer->rx.buf) in __scmi_xfer_info_init()
2598 return -ENOMEM; in __scmi_xfer_info_init()
2600 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
2601 init_completion(&xfer->done); in __scmi_xfer_info_init()
2602 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
2605 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
2608 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
2615 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
2617 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
2618 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2619 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2623 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2625 return -EINVAL; in scmi_channels_max_msg_configure()
2626 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2629 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2631 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
2632 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2646 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
2647 if (!ret && !idr_is_empty(&sinfo->rx_idr)) in scmi_xfer_info_init()
2648 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
2664 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2666 if (!info->desc->ops->chan_available(of_node, idx)) { in scmi_chan_setup()
2669 return -EINVAL; in scmi_chan_setup()
2673 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2675 return -ENOMEM; in scmi_chan_setup()
2677 cinfo->is_p2a = !tx; in scmi_chan_setup()
2678 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2679 cinfo->max_msg_size = info->desc->max_msg_size; in scmi_chan_setup()
2685 tdev = scmi_device_create(of_node, info->dev, prot_id, name); in scmi_chan_setup()
2687 dev_err(info->dev, in scmi_chan_setup()
2689 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2690 return -EINVAL; in scmi_chan_setup()
2694 cinfo->id = prot_id; in scmi_chan_setup()
2695 cinfo->dev = &tdev->dev; in scmi_chan_setup()
2696 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2699 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2700 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2704 if (tx && is_polling_required(cinfo, info->desc)) { in scmi_chan_setup()
2705 if (is_transport_polling_capable(info->desc)) in scmi_chan_setup()
2706 dev_info(&tdev->dev, in scmi_chan_setup()
2707 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2710 dev_warn(&tdev->dev, in scmi_chan_setup()
2717 dev_err(info->dev, in scmi_chan_setup()
2722 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2723 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2728 cinfo->handle = &info->handle; in scmi_chan_setup()
2741 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2746 dev_err(info->dev, in scmi_txrx_setup()
2753 * scmi_channels_setup - Helper to initialize all required channels
2773 struct device_node *top_np = info->dev->of_node; in scmi_channels_setup()
2787 dev_err(info->dev, in scmi_channels_setup()
2802 if (cinfo->dev) { in scmi_chan_destroy()
2803 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_chan_destroy()
2804 struct scmi_device *sdev = to_scmi_dev(cinfo->dev); in scmi_chan_destroy()
2806 of_node_put(cinfo->dev->of_node); in scmi_chan_destroy()
2807 scmi_device_destroy(info->dev, id, sdev->name); in scmi_chan_destroy()
2808 cinfo->dev = NULL; in scmi_chan_destroy()
2819 idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_channels()
2829 scmi_cleanup_channels(info, &info->tx_idr); in scmi_cleanup_txrx_channels()
2831 scmi_cleanup_channels(info, &info->rx_idr); in scmi_cleanup_txrx_channels()
2841 if (sdev->dev.parent != info->dev) in scmi_bus_notifier()
2850 scmi_handle_put(sdev->handle); in scmi_bus_notifier()
2851 sdev->handle = NULL; in scmi_bus_notifier()
2857 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), in scmi_bus_notifier()
2858 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? in scmi_bus_notifier()
2871 np = idr_find(&info->active_protocols, id_table->protocol_id); in scmi_device_request_notifier()
2875 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", in scmi_device_request_notifier()
2876 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", in scmi_device_request_notifier()
2877 id_table->name, id_table->protocol_id); in scmi_device_request_notifier()
2881 scmi_create_protocol_devices(np, info, id_table->protocol_id, in scmi_device_request_notifier()
2882 id_table->name); in scmi_device_request_notifier()
2885 scmi_destroy_protocol_devices(info, id_table->protocol_id, in scmi_device_request_notifier()
2886 id_table->name); in scmi_device_request_notifier()
2916 struct scmi_debug_info *dbg = filp->private_data; in reset_all_on_write()
2919 atomic_set(&dbg->counters[i], 0); in reset_all_on_write()
2940 &dbg->counters[idx]); in scmi_debugfs_counters_setup()
2952 debugfs_remove_recursive(dbg->top_dentry); in scmi_debugfs_common_cleanup()
2953 kfree(dbg->name); in scmi_debugfs_common_cleanup()
2954 kfree(dbg->type); in scmi_debugfs_common_cleanup()
2964 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); in scmi_debugfs_common_setup()
2968 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); in scmi_debugfs_common_setup()
2969 if (!dbg->name) { in scmi_debugfs_common_setup()
2970 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2974 of_property_read_string(info->dev->of_node, "compatible", &c_ptr); in scmi_debugfs_common_setup()
2975 dbg->type = kstrdup(c_ptr, GFP_KERNEL); in scmi_debugfs_common_setup()
2976 if (!dbg->type) { in scmi_debugfs_common_setup()
2977 kfree(dbg->name); in scmi_debugfs_common_setup()
2978 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2982 snprintf(top_dir, 16, "%d", info->id); in scmi_debugfs_common_setup()
2986 dbg->is_atomic = info->desc->atomic_enabled && in scmi_debugfs_common_setup()
2987 is_transport_polling_capable(info->desc); in scmi_debugfs_common_setup()
2990 (char **)&dbg->name); in scmi_debugfs_common_setup()
2993 (u32 *)&info->desc->atomic_threshold); in scmi_debugfs_common_setup()
2995 debugfs_create_str("type", 0400, trans, (char **)&dbg->type); in scmi_debugfs_common_setup()
2997 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); in scmi_debugfs_common_setup()
3000 (u32 *)&info->desc->max_rx_timeout_ms); in scmi_debugfs_common_setup()
3003 (u32 *)&info->desc->max_msg_size); in scmi_debugfs_common_setup()
3006 (u32 *)&info->tx_minfo.max_msg); in scmi_debugfs_common_setup()
3009 (u32 *)&info->rx_minfo.max_msg); in scmi_debugfs_common_setup()
3014 dbg->top_dentry = top_dentry; in scmi_debugfs_common_setup()
3016 if (devm_add_action_or_reset(info->dev, in scmi_debugfs_common_setup()
3031 idr_for_each_entry(&info->tx_idr, cinfo, id) { in scmi_debugfs_raw_mode_setup()
3037 dev_warn(info->dev, in scmi_debugfs_raw_mode_setup()
3038 "SCMI RAW - Error enumerating channels\n"); in scmi_debugfs_raw_mode_setup()
3042 if (!test_bit(cinfo->id, protos)) { in scmi_debugfs_raw_mode_setup()
3043 channels[num_chans++] = cinfo->id; in scmi_debugfs_raw_mode_setup()
3044 set_bit(cinfo->id, protos); in scmi_debugfs_raw_mode_setup()
3048 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, in scmi_debugfs_raw_mode_setup()
3049 info->id, channels, num_chans, in scmi_debugfs_raw_mode_setup()
3050 info->desc, info->tx_minfo.max_msg); in scmi_debugfs_raw_mode_setup()
3051 if (IS_ERR(info->raw)) { in scmi_debugfs_raw_mode_setup()
3052 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); in scmi_debugfs_raw_mode_setup()
3053 ret = PTR_ERR(info->raw); in scmi_debugfs_raw_mode_setup()
3054 info->raw = NULL; in scmi_debugfs_raw_mode_setup()
3066 if (!trans || !trans->supplier || !trans->core_ops) in scmi_transport_setup()
3069 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { in scmi_transport_setup()
3076 *trans->core_ops = &scmi_trans_core_ops; in scmi_transport_setup()
3078 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); in scmi_transport_setup()
3080 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", in scmi_transport_setup()
3081 &trans->desc.max_rx_timeout_ms); in scmi_transport_setup()
3082 if (ret && ret != -EINVAL) in scmi_transport_setup()
3083 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); in scmi_transport_setup()
3085 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size", in scmi_transport_setup()
3086 &trans->desc.max_msg_size); in scmi_transport_setup()
3087 if (ret && ret != -EINVAL) in scmi_transport_setup()
3088 dev_err(dev, "Malformed arm,max-msg-size DT property.\n"); in scmi_transport_setup()
3090 ret = of_property_read_u32(dev->of_node, "arm,max-msg", in scmi_transport_setup()
3091 &trans->desc.max_msg); in scmi_transport_setup()
3092 if (ret && ret != -EINVAL) in scmi_transport_setup()
3093 dev_err(dev, "Malformed arm,max-msg DT property.\n"); in scmi_transport_setup()
3096 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n", in scmi_transport_setup()
3097 trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size, in scmi_transport_setup()
3098 trans->desc.max_msg); in scmi_transport_setup()
3100 /* System wide atomic threshold for atomic ops .. if any */ in scmi_transport_setup()
3101 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us", in scmi_transport_setup()
3102 &trans->desc.atomic_threshold)) in scmi_transport_setup()
3104 "SCMI System wide atomic threshold set to %u us\n", in scmi_transport_setup()
3105 trans->desc.atomic_threshold); in scmi_transport_setup()
3107 return &trans->desc; in scmi_transport_setup()
3112 struct scmi_revision_info *rev = &info->version; in scmi_enable_matching_quirks()
3114 dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n", in scmi_enable_matching_quirks()
3115 rev->vendor_id, rev->sub_vendor_id, rev->impl_ver); in scmi_enable_matching_quirks()
3118 scmi_quirks_enable(info->dev, rev->vendor_id, in scmi_enable_matching_quirks()
3119 rev->sub_vendor_id, rev->impl_ver); in scmi_enable_matching_quirks()
3130 struct device *dev = &pdev->dev; in scmi_probe()
3131 struct device_node *child, *np = dev->of_node; in scmi_probe()
3136 ret = -EINVAL; in scmi_probe()
3142 return -ENOMEM; in scmi_probe()
3144 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); in scmi_probe()
3145 if (info->id < 0) in scmi_probe()
3146 return info->id; in scmi_probe()
3148 info->dev = dev; in scmi_probe()
3149 info->desc = desc; in scmi_probe()
3150 info->bus_nb.notifier_call = scmi_bus_notifier; in scmi_probe()
3151 info->dev_req_nb.notifier_call = scmi_device_request_notifier; in scmi_probe()
3152 INIT_LIST_HEAD(&info->node); in scmi_probe()
3153 idr_init(&info->protocols); in scmi_probe()
3154 mutex_init(&info->protocols_mtx); in scmi_probe()
3155 idr_init(&info->active_protocols); in scmi_probe()
3156 mutex_init(&info->devreq_mtx); in scmi_probe()
3159 idr_init(&info->tx_idr); in scmi_probe()
3160 idr_init(&info->rx_idr); in scmi_probe()
3162 handle = &info->handle; in scmi_probe()
3163 handle->dev = info->dev; in scmi_probe()
3164 handle->version = &info->version; in scmi_probe()
3165 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
3166 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
3167 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
3168 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
3177 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3184 &info->dev_req_nb); in scmi_probe()
3197 info->dbg = scmi_debugfs_common_setup(info); in scmi_probe()
3198 if (!info->dbg) in scmi_probe()
3201 if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { in scmi_probe()
3219 if (info->desc->atomic_enabled && in scmi_probe()
3220 !is_transport_polling_capable(info->desc)) in scmi_probe()
3222 "Transport is not polling capable. Atomic mode not supported.\n"); in scmi_probe()
3240 list_add_tail(&info->node, &scmi_list); in scmi_probe()
3264 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
3280 scmi_raw_mode_cleanup(info->raw); in scmi_probe()
3281 scmi_notification_exit(&info->handle); in scmi_probe()
3284 &info->dev_req_nb); in scmi_probe()
3286 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3290 ida_free(&scmi_id, info->id); in scmi_probe()
3303 scmi_raw_mode_cleanup(info->raw); in scmi_remove()
3306 if (info->users) in scmi_remove()
3307 dev_warn(&pdev->dev, in scmi_remove()
3309 list_del(&info->node); in scmi_remove()
3312 scmi_notification_exit(&info->handle); in scmi_remove()
3314 mutex_lock(&info->protocols_mtx); in scmi_remove()
3315 idr_destroy(&info->protocols); in scmi_remove()
3316 mutex_unlock(&info->protocols_mtx); in scmi_remove()
3318 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
3320 idr_destroy(&info->active_protocols); in scmi_remove()
3323 &info->dev_req_nb); in scmi_remove()
3324 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_remove()
3329 ida_free(&scmi_id, info->id); in scmi_remove()
3337 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
3338 info->version.minor_ver); in protocol_version_show()
3347 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
3356 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
3365 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
3380 .name = "arm-scmi",
3406 if (!info->dbg) in scmi_inflight_count()
3409 return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]); in scmi_inflight_count()
3421 return -EINVAL; in scmi_driver_init()
3468 MODULE_ALIAS("platform:arm-scmi");