1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * System Control and Management Interface (SCMI) Message Protocol 4 * driver common header file containing some definitions, structures 5 * and function prototypes used in all the different SCMI protocols. 6 * 7 * Copyright (C) 2018-2022 ARM Ltd. 8 */ 9 #ifndef _SCMI_COMMON_H 10 #define _SCMI_COMMON_H 11 12 #include <linux/bitfield.h> 13 #include <linux/completion.h> 14 #include <linux/device.h> 15 #include <linux/errno.h> 16 #include <linux/kernel.h> 17 #include <linux/hashtable.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/refcount.h> 21 #include <linux/scmi_protocol.h> 22 #include <linux/spinlock.h> 23 #include <linux/types.h> 24 25 #include <asm/unaligned.h> 26 27 #include "protocols.h" 28 #include "notify.h" 29 30 #define SCMI_MAX_CHANNELS 256 31 32 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) 33 34 enum scmi_error_codes { 35 SCMI_SUCCESS = 0, /* Success */ 36 SCMI_ERR_SUPPORT = -1, /* Not supported */ 37 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */ 38 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */ 39 SCMI_ERR_ENTRY = -4, /* Not found */ 40 SCMI_ERR_RANGE = -5, /* Value out of range */ 41 SCMI_ERR_BUSY = -6, /* Device busy */ 42 SCMI_ERR_COMMS = -7, /* Communication Error */ 43 SCMI_ERR_GENERIC = -8, /* Generic Error */ 44 SCMI_ERR_HARDWARE = -9, /* Hardware Error */ 45 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ 46 }; 47 48 static const int scmi_linux_errmap[] = { 49 /* better than switch case as long as return value is continuous */ 50 0, /* SCMI_SUCCESS */ 51 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */ 52 -EINVAL, /* SCMI_ERR_PARAM */ 53 -EACCES, /* SCMI_ERR_ACCESS */ 54 -ENOENT, /* SCMI_ERR_ENTRY */ 55 -ERANGE, /* SCMI_ERR_RANGE */ 56 -EBUSY, /* SCMI_ERR_BUSY */ 57 -ECOMM, /* SCMI_ERR_COMMS */ 58 -EIO, /* SCMI_ERR_GENERIC */ 59 -EREMOTEIO, /* SCMI_ERR_HARDWARE */ 60 -EPROTO, /* SCMI_ERR_PROTOCOL */ 61 }; 62 63 static inline int scmi_to_linux_errno(int errno) 64 { 65 int err_idx = -errno; 66 67 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) 68 return scmi_linux_errmap[err_idx]; 69 return -EIO; 70 } 71 72 #define MSG_ID_MASK GENMASK(7, 0) 73 #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) 74 #define MSG_TYPE_MASK GENMASK(9, 8) 75 #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) 76 #define MSG_TYPE_COMMAND 0 77 #define MSG_TYPE_DELAYED_RESP 2 78 #define MSG_TYPE_NOTIFICATION 3 79 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) 80 #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) 81 #define MSG_TOKEN_ID_MASK GENMASK(27, 18) 82 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) 83 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) 84 85 /* 86 * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in 87 * order to minimize space and collisions, this should equal max_msg, i.e. the 88 * maximum number of in-flight messages on a specific platform, but such value 89 * is only available at runtime while kernel hashtables are statically sized: 90 * pick instead as a fixed static size the maximum number of entries that can 91 * fit the whole table into one 4k page. 92 */ 93 #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 94 95 /** 96 * pack_scmi_header() - packs and returns 32-bit header 97 * 98 * @hdr: pointer to header containing all the information on message id, 99 * protocol id, sequence id and type. 100 * 101 * Return: 32-bit packed message header to be sent to the platform. 102 */ 103 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) 104 { 105 return FIELD_PREP(MSG_ID_MASK, hdr->id) | 106 FIELD_PREP(MSG_TYPE_MASK, hdr->type) | 107 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | 108 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); 109 } 110 111 /** 112 * unpack_scmi_header() - unpacks and records message and protocol id 113 * 114 * @msg_hdr: 32-bit packed message header sent from the platform 115 * @hdr: pointer to header to fetch message and protocol id. 116 */ 117 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) 118 { 119 hdr->id = MSG_XTRACT_ID(msg_hdr); 120 hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); 121 hdr->type = MSG_XTRACT_TYPE(msg_hdr); 122 } 123 124 /* 125 * An helper macro to lookup an xfer from the @pending_xfers hashtable 126 * using the message sequence number token as a key. 127 */ 128 #define XFER_FIND(__ht, __k) \ 129 ({ \ 130 typeof(__k) k_ = __k; \ 131 struct scmi_xfer *xfer_ = NULL; \ 132 \ 133 hash_for_each_possible((__ht), xfer_, node, k_) \ 134 if (xfer_->hdr.seq == k_) \ 135 break; \ 136 xfer_; \ 137 }) 138 139 struct scmi_revision_info * 140 scmi_revision_area_get(const struct scmi_protocol_handle *ph); 141 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, 142 u8 *prot_imp); 143 144 extern const struct bus_type scmi_bus_type; 145 146 #define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0 147 #define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1 148 extern struct blocking_notifier_head scmi_requested_devices_nh; 149 150 struct scmi_device *scmi_device_create(struct device_node *np, 151 struct device *parent, int protocol, 152 const char *name); 153 void scmi_device_destroy(struct device *parent, int protocol, const char *name); 154 155 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id); 156 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); 157 158 /* SCMI Transport */ 159 /** 160 * struct scmi_chan_info - Structure representing a SCMI channel information 161 * 162 * @id: An identifier for this channel: this matches the protocol number 163 * used to initialize this channel 164 * @dev: Reference to device in the SCMI hierarchy corresponding to this 165 * channel 166 * @rx_timeout_ms: The configured RX timeout in milliseconds. 167 * @handle: Pointer to SCMI entity handle 168 * @no_completion_irq: Flag to indicate that this channel has no completion 169 * interrupt mechanism for synchronous commands. 170 * This can be dynamically set by transports at run-time 171 * inside their provided .chan_setup(). 172 * @transport_info: Transport layer related information 173 */ 174 struct scmi_chan_info { 175 int id; 176 struct device *dev; 177 unsigned int rx_timeout_ms; 178 struct scmi_handle *handle; 179 bool no_completion_irq; 180 void *transport_info; 181 }; 182 183 /** 184 * struct scmi_transport_ops - Structure representing a SCMI transport ops 185 * 186 * @link_supplier: Optional callback to add link to a supplier device 187 * @chan_available: Callback to check if channel is available or not 188 * @chan_setup: Callback to allocate and setup a channel 189 * @chan_free: Callback to free a channel 190 * @get_max_msg: Optional callback to provide max_msg dynamically 191 * Returns the maximum number of messages for the channel type 192 * (tx or rx) that can be pending simultaneously in the system 193 * @send_message: Callback to send a message 194 * @mark_txdone: Callback to mark tx as done 195 * @fetch_response: Callback to fetch response 196 * @fetch_notification: Callback to fetch notification 197 * @clear_channel: Callback to clear a channel 198 * @poll_done: Callback to poll transfer status 199 */ 200 struct scmi_transport_ops { 201 int (*link_supplier)(struct device *dev); 202 bool (*chan_available)(struct device_node *of_node, int idx); 203 int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, 204 bool tx); 205 int (*chan_free)(int id, void *p, void *data); 206 unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); 207 int (*send_message)(struct scmi_chan_info *cinfo, 208 struct scmi_xfer *xfer); 209 void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret, 210 struct scmi_xfer *xfer); 211 void (*fetch_response)(struct scmi_chan_info *cinfo, 212 struct scmi_xfer *xfer); 213 void (*fetch_notification)(struct scmi_chan_info *cinfo, 214 size_t max_len, struct scmi_xfer *xfer); 215 void (*clear_channel)(struct scmi_chan_info *cinfo); 216 bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); 217 }; 218 219 /** 220 * struct scmi_desc - Description of SoC integration 221 * 222 * @transport_init: An optional function that a transport can provide to 223 * initialize some transport-specific setup during SCMI core 224 * initialization, so ahead of SCMI core probing. 225 * @transport_exit: An optional function that a transport can provide to 226 * de-initialize some transport-specific setup during SCMI core 227 * de-initialization, so after SCMI core removal. 228 * @ops: Pointer to the transport specific ops structure 229 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 230 * @max_msg: Maximum number of messages for a channel type (tx or rx) that can 231 * be pending simultaneously in the system. May be overridden by the 232 * get_max_msg op. 233 * @max_msg_size: Maximum size of data per message that can be handled. 234 * @force_polling: Flag to force this whole transport to use SCMI core polling 235 * mechanism instead of completion interrupts even if available. 236 * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures 237 * synchronous-command messages are atomically 238 * completed on .send_message: no need to poll 239 * actively waiting for a response. 240 * Used by core internally only when polling is 241 * selected as a waiting for reply method: i.e. 242 * if a completion irq was found use that anyway. 243 * @atomic_enabled: Flag to indicate that this transport, which is assured not 244 * to sleep anywhere on the TX path, can be used in atomic mode 245 * when requested. 246 */ 247 struct scmi_desc { 248 int (*transport_init)(void); 249 void (*transport_exit)(void); 250 const struct scmi_transport_ops *ops; 251 int max_rx_timeout_ms; 252 int max_msg; 253 int max_msg_size; 254 const bool force_polling; 255 const bool sync_cmds_completed_on_ret; 256 const bool atomic_enabled; 257 }; 258 259 static inline bool is_polling_required(struct scmi_chan_info *cinfo, 260 const struct scmi_desc *desc) 261 { 262 return cinfo->no_completion_irq || desc->force_polling; 263 } 264 265 static inline bool is_transport_polling_capable(const struct scmi_desc *desc) 266 { 267 return desc->ops->poll_done || desc->sync_cmds_completed_on_ret; 268 } 269 270 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, 271 const struct scmi_desc *desc) 272 { 273 return is_polling_required(cinfo, desc) && 274 is_transport_polling_capable(desc); 275 } 276 277 void scmi_xfer_raw_put(const struct scmi_handle *handle, 278 struct scmi_xfer *xfer); 279 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle); 280 struct scmi_chan_info * 281 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id); 282 283 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle, 284 struct scmi_xfer *xfer); 285 286 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo, 287 struct scmi_xfer *xfer, 288 unsigned int timeout_ms); 289 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX 290 extern const struct scmi_desc scmi_mailbox_desc; 291 #endif 292 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC 293 extern const struct scmi_desc scmi_smc_desc; 294 #endif 295 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO 296 extern const struct scmi_desc scmi_virtio_desc; 297 #endif 298 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE 299 extern const struct scmi_desc scmi_optee_desc; 300 #endif 301 302 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); 303 304 enum scmi_bad_msg { 305 MSG_UNEXPECTED = -1, 306 MSG_INVALID = -2, 307 MSG_UNKNOWN = -3, 308 MSG_NOMEM = -4, 309 MSG_MBOX_SPURIOUS = -5, 310 }; 311 312 void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, 313 enum scmi_bad_msg err); 314 315 /* shmem related declarations */ 316 struct scmi_shared_mem; 317 318 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 319 struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); 320 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); 321 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 322 struct scmi_xfer *xfer); 323 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, 324 size_t max_len, struct scmi_xfer *xfer); 325 void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); 326 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, 327 struct scmi_xfer *xfer); 328 bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem); 329 bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem); 330 331 /* declarations for message passing transports */ 332 struct scmi_msg_payld; 333 334 /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ 335 #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) 336 337 size_t msg_response_size(struct scmi_xfer *xfer); 338 size_t msg_command_size(struct scmi_xfer *xfer); 339 void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); 340 u32 msg_read_header(struct scmi_msg_payld *msg); 341 void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, 342 struct scmi_xfer *xfer); 343 void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, 344 size_t max_len, struct scmi_xfer *xfer); 345 346 void scmi_notification_instance_data_set(const struct scmi_handle *handle, 347 void *priv); 348 void *scmi_notification_instance_data_get(const struct scmi_handle *handle); 349 #endif /* _SCMI_COMMON_H */ 350