1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol
4 * driver common header file containing some definitions, structures
5 * and function prototypes used in all the different SCMI protocols.
6 *
7 * Copyright (C) 2018-2024 ARM Ltd.
8 */
9 #ifndef _SCMI_COMMON_H
10 #define _SCMI_COMMON_H
11
12 #include <linux/bitfield.h>
13 #include <linux/completion.h>
14 #include <linux/device.h>
15 #include <linux/errno.h>
16 #include <linux/kernel.h>
17 #include <linux/hashtable.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/refcount.h>
21 #include <linux/scmi_protocol.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24
25 #include <linux/unaligned.h>
26
27 #include "protocols.h"
28 #include "notify.h"
29
30 #define SCMI_MAX_CHANNELS 256
31
32 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
33
34 enum scmi_error_codes {
35 SCMI_SUCCESS = 0, /* Success */
36 SCMI_ERR_SUPPORT = -1, /* Not supported */
37 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
38 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
39 SCMI_ERR_ENTRY = -4, /* Not found */
40 SCMI_ERR_RANGE = -5, /* Value out of range */
41 SCMI_ERR_BUSY = -6, /* Device busy */
42 SCMI_ERR_COMMS = -7, /* Communication Error */
43 SCMI_ERR_GENERIC = -8, /* Generic Error */
44 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
45 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
46 };
47
48 static const int scmi_linux_errmap[] = {
49 /* better than switch case as long as return value is continuous */
50 0, /* SCMI_SUCCESS */
51 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
52 -EINVAL, /* SCMI_ERR_PARAM */
53 -EACCES, /* SCMI_ERR_ACCESS */
54 -ENOENT, /* SCMI_ERR_ENTRY */
55 -ERANGE, /* SCMI_ERR_RANGE */
56 -EBUSY, /* SCMI_ERR_BUSY */
57 -ECOMM, /* SCMI_ERR_COMMS */
58 -EIO, /* SCMI_ERR_GENERIC */
59 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
60 -EPROTO, /* SCMI_ERR_PROTOCOL */
61 };
62
scmi_to_linux_errno(int errno)63 static inline int scmi_to_linux_errno(int errno)
64 {
65 int err_idx = -errno;
66
67 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
68 return scmi_linux_errmap[err_idx];
69 return -EIO;
70 }
71
72 #define MSG_ID_MASK GENMASK(7, 0)
73 #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
74 #define MSG_TYPE_MASK GENMASK(9, 8)
75 #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
76 #define MSG_TYPE_COMMAND 0
77 #define MSG_TYPE_DELAYED_RESP 2
78 #define MSG_TYPE_NOTIFICATION 3
79 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
80 #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
81 #define MSG_TOKEN_ID_MASK GENMASK(27, 18)
82 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
83 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
84
85 /*
86 * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
87 * order to minimize space and collisions, this should equal max_msg, i.e. the
88 * maximum number of in-flight messages on a specific platform, but such value
89 * is only available at runtime while kernel hashtables are statically sized:
90 * pick instead as a fixed static size the maximum number of entries that can
91 * fit the whole table into one 4k page.
92 */
93 #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
94
95 /**
96 * pack_scmi_header() - packs and returns 32-bit header
97 *
98 * @hdr: pointer to header containing all the information on message id,
99 * protocol id, sequence id and type.
100 *
101 * Return: 32-bit packed message header to be sent to the platform.
102 */
pack_scmi_header(struct scmi_msg_hdr * hdr)103 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
104 {
105 return FIELD_PREP(MSG_ID_MASK, hdr->id) |
106 FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
107 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
108 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
109 }
110
111 /**
112 * unpack_scmi_header() - unpacks and records message and protocol id
113 *
114 * @msg_hdr: 32-bit packed message header sent from the platform
115 * @hdr: pointer to header to fetch message and protocol id.
116 */
unpack_scmi_header(u32 msg_hdr,struct scmi_msg_hdr * hdr)117 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
118 {
119 hdr->id = MSG_XTRACT_ID(msg_hdr);
120 hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
121 hdr->type = MSG_XTRACT_TYPE(msg_hdr);
122 }
123
124 /*
125 * An helper macro to lookup an xfer from the @pending_xfers hashtable
126 * using the message sequence number token as a key.
127 */
128 #define XFER_FIND(__ht, __k) \
129 ({ \
130 typeof(__k) k_ = __k; \
131 struct scmi_xfer *xfer_ = NULL; \
132 \
133 hash_for_each_possible((__ht), xfer_, node, k_) \
134 if (xfer_->hdr.seq == k_) \
135 break; \
136 xfer_; \
137 })
138
139 struct scmi_revision_info *
140 scmi_revision_area_get(const struct scmi_protocol_handle *ph);
141 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
142 u8 *prot_imp);
143
144 extern const struct bus_type scmi_bus_type;
145
146 #define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
147 #define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
148 extern struct blocking_notifier_head scmi_requested_devices_nh;
149
150 struct scmi_device *scmi_device_create(struct device_node *np,
151 struct device *parent, int protocol,
152 const char *name);
153 void scmi_device_destroy(struct device *parent, int protocol, const char *name);
154
155 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
156 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
157
158 /* SCMI Transport */
159 /**
160 * struct scmi_chan_info - Structure representing a SCMI channel information
161 *
162 * @id: An identifier for this channel: this matches the protocol number
163 * used to initialize this channel
164 * @dev: Reference to device in the SCMI hierarchy corresponding to this
165 * channel
166 * @is_p2a: A flag to identify a channel as P2A (RX)
167 * @rx_timeout_ms: The configured RX timeout in milliseconds.
168 * @handle: Pointer to SCMI entity handle
169 * @no_completion_irq: Flag to indicate that this channel has no completion
170 * interrupt mechanism for synchronous commands.
171 * This can be dynamically set by transports at run-time
172 * inside their provided .chan_setup().
173 * @transport_info: Transport layer related information
174 */
175 struct scmi_chan_info {
176 int id;
177 struct device *dev;
178 bool is_p2a;
179 unsigned int rx_timeout_ms;
180 struct scmi_handle *handle;
181 bool no_completion_irq;
182 void *transport_info;
183 };
184
185 /**
186 * struct scmi_transport_ops - Structure representing a SCMI transport ops
187 *
188 * @chan_available: Callback to check if channel is available or not
189 * @chan_setup: Callback to allocate and setup a channel
190 * @chan_free: Callback to free a channel
191 * @get_max_msg: Optional callback to provide max_msg dynamically
192 * Returns the maximum number of messages for the channel type
193 * (tx or rx) that can be pending simultaneously in the system
194 * @send_message: Callback to send a message
195 * @mark_txdone: Callback to mark tx as done
196 * @fetch_response: Callback to fetch response
197 * @fetch_notification: Callback to fetch notification
198 * @clear_channel: Callback to clear a channel
199 * @poll_done: Callback to poll transfer status
200 */
201 struct scmi_transport_ops {
202 bool (*chan_available)(struct device_node *of_node, int idx);
203 int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
204 bool tx);
205 int (*chan_free)(int id, void *p, void *data);
206 unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
207 int (*send_message)(struct scmi_chan_info *cinfo,
208 struct scmi_xfer *xfer);
209 void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
210 struct scmi_xfer *xfer);
211 void (*fetch_response)(struct scmi_chan_info *cinfo,
212 struct scmi_xfer *xfer);
213 void (*fetch_notification)(struct scmi_chan_info *cinfo,
214 size_t max_len, struct scmi_xfer *xfer);
215 void (*clear_channel)(struct scmi_chan_info *cinfo);
216 bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
217 };
218
219 /**
220 * struct scmi_desc - Description of SoC integration
221 *
222 * @ops: Pointer to the transport specific ops structure
223 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
224 * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
225 * be pending simultaneously in the system. May be overridden by the
226 * get_max_msg op.
227 * @max_msg_size: Maximum size of data per message that can be handled.
228 * @force_polling: Flag to force this whole transport to use SCMI core polling
229 * mechanism instead of completion interrupts even if available.
230 * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
231 * synchronous-command messages are atomically
232 * completed on .send_message: no need to poll
233 * actively waiting for a response.
234 * Used by core internally only when polling is
235 * selected as a waiting for reply method: i.e.
236 * if a completion irq was found use that anyway.
237 * @atomic_enabled: Flag to indicate that this transport, which is assured not
238 * to sleep anywhere on the TX path, can be used in atomic mode
239 * when requested.
240 */
241 struct scmi_desc {
242 const struct scmi_transport_ops *ops;
243 int max_rx_timeout_ms;
244 int max_msg;
245 int max_msg_size;
246 const bool force_polling;
247 const bool sync_cmds_completed_on_ret;
248 const bool atomic_enabled;
249 };
250
is_polling_required(struct scmi_chan_info * cinfo,const struct scmi_desc * desc)251 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
252 const struct scmi_desc *desc)
253 {
254 return cinfo->no_completion_irq || desc->force_polling;
255 }
256
is_transport_polling_capable(const struct scmi_desc * desc)257 static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
258 {
259 return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
260 }
261
is_polling_enabled(struct scmi_chan_info * cinfo,const struct scmi_desc * desc)262 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
263 const struct scmi_desc *desc)
264 {
265 return is_polling_required(cinfo, desc) &&
266 is_transport_polling_capable(desc);
267 }
268
269 void scmi_xfer_raw_put(const struct scmi_handle *handle,
270 struct scmi_xfer *xfer);
271 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
272 struct scmi_chan_info *
273 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
274
275 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
276 struct scmi_xfer *xfer);
277
278 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
279 struct scmi_xfer *xfer,
280 unsigned int timeout_ms);
281
282 enum debug_counters {
283 SENT_OK,
284 SENT_FAIL,
285 SENT_FAIL_POLLING_UNSUPPORTED,
286 SENT_FAIL_CHANNEL_NOT_FOUND,
287 RESPONSE_OK,
288 NOTIFICATION_OK,
289 DELAYED_RESPONSE_OK,
290 XFERS_RESPONSE_TIMEOUT,
291 XFERS_RESPONSE_POLLED_TIMEOUT,
292 RESPONSE_POLLED_OK,
293 ERR_MSG_UNEXPECTED,
294 ERR_MSG_INVALID,
295 ERR_MSG_NOMEM,
296 ERR_PROTOCOL,
297 SCMI_DEBUG_COUNTERS_LAST
298 };
299
scmi_inc_count(atomic_t * arr,int stat)300 static inline void scmi_inc_count(atomic_t *arr, int stat)
301 {
302 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
303 atomic_inc(&arr[stat]);
304 }
305
306 enum scmi_bad_msg {
307 MSG_UNEXPECTED = -1,
308 MSG_INVALID = -2,
309 MSG_UNKNOWN = -3,
310 MSG_NOMEM = -4,
311 MSG_MBOX_SPURIOUS = -5,
312 };
313
314 /* shmem related declarations */
315 struct scmi_shared_mem;
316
317 /**
318 * struct scmi_shared_mem_operations - Transport core operations for
319 * Shared Memory
320 *
321 * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem
322 * @read_header: Read header of the message currently hold in @shmem
323 * @fetch_response: Copy the message response from @shmem into @xfer
324 * @fetch_notification: Copy the message notification from @shmem into @xfer
325 * @clear_channel: Clear the @shmem channel busy flag
326 * @poll_done: Check if poll has completed for @xfer on @shmem
327 * @channel_free: Check if @shmem channel is marked as free
328 * @channel_intr_enabled: Check is @shmem channel has requested a completion irq
329 * @setup_iomap: Setup IO shared memory for channel @cinfo
330 */
331 struct scmi_shared_mem_operations {
332 void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
333 struct scmi_xfer *xfer,
334 struct scmi_chan_info *cinfo);
335 u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
336
337 void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
338 struct scmi_xfer *xfer);
339 void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
340 size_t max_len, struct scmi_xfer *xfer);
341 void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
342 bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
343 struct scmi_xfer *xfer);
344 bool (*channel_free)(struct scmi_shared_mem __iomem *shmem);
345 bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
346 void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
347 struct device *dev,
348 bool tx, struct resource *res);
349 };
350
351 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
352
353 /* declarations for message passing transports */
354 struct scmi_msg_payld;
355
356 /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
357 #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
358
359 /**
360 * struct scmi_message_operations - Transport core operations for Message
361 *
362 * @response_size: Get calculated response size for @xfer
363 * @command_size: Get calculated command size for @xfer
364 * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg
365 * @read_header: Read header of the message currently hold in @msg
366 * @fetch_response: Copy the message response from @msg into @xfer
367 * @fetch_notification: Copy the message notification from @msg into @xfer
368 */
369 struct scmi_message_operations {
370 size_t (*response_size)(struct scmi_xfer *xfer);
371 size_t (*command_size)(struct scmi_xfer *xfer);
372 void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
373 u32 (*read_header)(struct scmi_msg_payld *msg);
374 void (*fetch_response)(struct scmi_msg_payld *msg, size_t len,
375 struct scmi_xfer *xfer);
376 void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len,
377 size_t max_len, struct scmi_xfer *xfer);
378 };
379
380 const struct scmi_message_operations *scmi_message_operations_get(void);
381
382 /**
383 * struct scmi_transport_core_operations - Transpoert core operations
384 *
385 * @bad_message_trace: An helper to report a malformed/unexpected message
386 * @rx_callback: Callback to report received messages
387 * @shmem: Datagram operations for shared memory based transports
388 * @msg: Datagram operations for message based transports
389 */
390 struct scmi_transport_core_operations {
391 void (*bad_message_trace)(struct scmi_chan_info *cinfo,
392 u32 msg_hdr, enum scmi_bad_msg err);
393 void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr,
394 void *priv);
395 const struct scmi_shared_mem_operations *shmem;
396 const struct scmi_message_operations *msg;
397 };
398
399 /**
400 * struct scmi_transport - A structure representing a configured transport
401 *
402 * @supplier: Device representing the transport and acting as a supplier for
403 * the core SCMI stack
404 * @desc: Transport descriptor
405 * @core_ops: A pointer to a pointer used by the core SCMI stack to make the
406 * core transport operations accessible to the transports.
407 */
408 struct scmi_transport {
409 struct device *supplier;
410 struct scmi_desc *desc;
411 struct scmi_transport_core_operations **core_ops;
412 };
413
414 #define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\
415 static void __tag##_dev_free(void *data) \
416 { \
417 struct platform_device *spdev = data; \
418 \
419 platform_device_unregister(spdev); \
420 } \
421 \
422 static int __tag##_probe(struct platform_device *pdev) \
423 { \
424 struct device *dev = &pdev->dev; \
425 struct platform_device *spdev; \
426 struct scmi_transport strans; \
427 int ret; \
428 \
429 spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO); \
430 if (!spdev) \
431 return -ENOMEM; \
432 \
433 device_set_of_node_from_dev(&spdev->dev, dev); \
434 \
435 strans.supplier = dev; \
436 strans.desc = &(__desc); \
437 strans.core_ops = &(__core_ops); \
438 \
439 ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
440 if (ret) \
441 goto err; \
442 \
443 ret = platform_device_add(spdev); \
444 if (ret) \
445 goto err; \
446 \
447 return devm_add_action_or_reset(dev, __tag##_dev_free, spdev); \
448 \
449 err: \
450 platform_device_put(spdev); \
451 return ret; \
452 } \
453 \
454 static struct platform_driver __drv = { \
455 .driver = { \
456 .name = #__tag "_transport", \
457 .of_match_table = __match, \
458 }, \
459 .probe = __tag##_probe, \
460 }
461
462 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
463 void *priv);
464 void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
465 #endif /* _SCMI_COMMON_H */
466