1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol
4 * driver common header file containing some definitions, structures
5 * and function prototypes used in all the different SCMI protocols.
6 *
7 * Copyright (C) 2018-2024 ARM Ltd.
8 */
9 #ifndef _SCMI_COMMON_H
10 #define _SCMI_COMMON_H
11
12 #include <linux/bitfield.h>
13 #include <linux/completion.h>
14 #include <linux/device.h>
15 #include <linux/errno.h>
16 #include <linux/kernel.h>
17 #include <linux/hashtable.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/refcount.h>
21 #include <linux/scmi_protocol.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24
25 #include <linux/unaligned.h>
26
27 #include "protocols.h"
28 #include "notify.h"
29
30 #define SCMI_MAX_CHANNELS 256
31
32 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
33
34 #define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104
35
36 enum scmi_error_codes {
37 SCMI_SUCCESS = 0, /* Success */
38 SCMI_ERR_SUPPORT = -1, /* Not supported */
39 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
40 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
41 SCMI_ERR_ENTRY = -4, /* Not found */
42 SCMI_ERR_RANGE = -5, /* Value out of range */
43 SCMI_ERR_BUSY = -6, /* Device busy */
44 SCMI_ERR_COMMS = -7, /* Communication Error */
45 SCMI_ERR_GENERIC = -8, /* Generic Error */
46 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
47 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
48 };
49
50 static const int scmi_linux_errmap[] = {
51 /* better than switch case as long as return value is continuous */
52 0, /* SCMI_SUCCESS */
53 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
54 -EINVAL, /* SCMI_ERR_PARAM */
55 -EACCES, /* SCMI_ERR_ACCESS */
56 -ENOENT, /* SCMI_ERR_ENTRY */
57 -ERANGE, /* SCMI_ERR_RANGE */
58 -EBUSY, /* SCMI_ERR_BUSY */
59 -ECOMM, /* SCMI_ERR_COMMS */
60 -EIO, /* SCMI_ERR_GENERIC */
61 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
62 -EPROTO, /* SCMI_ERR_PROTOCOL */
63 };
64
scmi_to_linux_errno(int errno)65 static inline int scmi_to_linux_errno(int errno)
66 {
67 int err_idx = -errno;
68
69 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
70 return scmi_linux_errmap[err_idx];
71 return -EIO;
72 }
73
74 #define MSG_ID_MASK GENMASK(7, 0)
75 #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
76 #define MSG_TYPE_MASK GENMASK(9, 8)
77 #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
78 #define MSG_TYPE_COMMAND 0
79 #define MSG_TYPE_DELAYED_RESP 2
80 #define MSG_TYPE_NOTIFICATION 3
81 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
82 #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
83 #define MSG_TOKEN_ID_MASK GENMASK(27, 18)
84 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
85 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
86
87 /*
88 * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
89 * order to minimize space and collisions, this should equal max_msg, i.e. the
90 * maximum number of in-flight messages on a specific platform, but such value
91 * is only available at runtime while kernel hashtables are statically sized:
92 * pick instead as a fixed static size the maximum number of entries that can
93 * fit the whole table into one 4k page.
94 */
95 #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
96
97 /**
98 * pack_scmi_header() - packs and returns 32-bit header
99 *
100 * @hdr: pointer to header containing all the information on message id,
101 * protocol id, sequence id and type.
102 *
103 * Return: 32-bit packed message header to be sent to the platform.
104 */
pack_scmi_header(struct scmi_msg_hdr * hdr)105 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
106 {
107 return FIELD_PREP(MSG_ID_MASK, hdr->id) |
108 FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
109 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
110 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
111 }
112
113 /**
114 * unpack_scmi_header() - unpacks and records message and protocol id
115 *
116 * @msg_hdr: 32-bit packed message header sent from the platform
117 * @hdr: pointer to header to fetch message and protocol id.
118 */
unpack_scmi_header(u32 msg_hdr,struct scmi_msg_hdr * hdr)119 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
120 {
121 hdr->id = MSG_XTRACT_ID(msg_hdr);
122 hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
123 hdr->type = MSG_XTRACT_TYPE(msg_hdr);
124 }
125
126 /*
127 * An helper macro to lookup an xfer from the @pending_xfers hashtable
128 * using the message sequence number token as a key.
129 */
130 #define XFER_FIND(__ht, __k) \
131 ({ \
132 typeof(__k) k_ = __k; \
133 struct scmi_xfer *xfer_ = NULL; \
134 \
135 hash_for_each_possible((__ht), xfer_, node, k_) \
136 if (xfer_->hdr.seq == k_) \
137 break; \
138 xfer_; \
139 })
140
141 struct scmi_revision_info *
142 scmi_revision_area_get(const struct scmi_protocol_handle *ph);
143 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
144 u8 *prot_imp);
145
146 extern const struct bus_type scmi_bus_type;
147
148 #define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
149 #define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
150 extern struct blocking_notifier_head scmi_requested_devices_nh;
151
152 struct scmi_device *scmi_device_create(struct device_node *np,
153 struct device *parent, int protocol,
154 const char *name);
155 void scmi_device_destroy(struct device *parent, int protocol, const char *name);
156
157 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
158 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
159
160 /* SCMI Transport */
161 /**
162 * struct scmi_chan_info - Structure representing a SCMI channel information
163 *
164 * @id: An identifier for this channel: this matches the protocol number
165 * used to initialize this channel
166 * @dev: Reference to device in the SCMI hierarchy corresponding to this
167 * channel
168 * @is_p2a: A flag to identify a channel as P2A (RX)
169 * @rx_timeout_ms: The configured RX timeout in milliseconds.
170 * @max_msg_size: Maximum size of message payload.
171 * @handle: Pointer to SCMI entity handle
172 * @no_completion_irq: Flag to indicate that this channel has no completion
173 * interrupt mechanism for synchronous commands.
174 * This can be dynamically set by transports at run-time
175 * inside their provided .chan_setup().
176 * @transport_info: Transport layer related information
177 */
178 struct scmi_chan_info {
179 int id;
180 struct device *dev;
181 bool is_p2a;
182 unsigned int rx_timeout_ms;
183 unsigned int max_msg_size;
184 struct scmi_handle *handle;
185 bool no_completion_irq;
186 void *transport_info;
187 };
188
189 /**
190 * struct scmi_transport_ops - Structure representing a SCMI transport ops
191 *
192 * @chan_available: Callback to check if channel is available or not
193 * @chan_setup: Callback to allocate and setup a channel
194 * @chan_free: Callback to free a channel
195 * @get_max_msg: Optional callback to provide max_msg dynamically
196 * Returns the maximum number of messages for the channel type
197 * (tx or rx) that can be pending simultaneously in the system
198 * @send_message: Callback to send a message
199 * @mark_txdone: Callback to mark tx as done
200 * @fetch_response: Callback to fetch response
201 * @fetch_notification: Callback to fetch notification
202 * @clear_channel: Callback to clear a channel
203 * @poll_done: Callback to poll transfer status
204 */
205 struct scmi_transport_ops {
206 bool (*chan_available)(struct device_node *of_node, int idx);
207 int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
208 bool tx);
209 int (*chan_free)(int id, void *p, void *data);
210 unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
211 int (*send_message)(struct scmi_chan_info *cinfo,
212 struct scmi_xfer *xfer);
213 void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
214 struct scmi_xfer *xfer);
215 void (*fetch_response)(struct scmi_chan_info *cinfo,
216 struct scmi_xfer *xfer);
217 void (*fetch_notification)(struct scmi_chan_info *cinfo,
218 size_t max_len, struct scmi_xfer *xfer);
219 void (*clear_channel)(struct scmi_chan_info *cinfo);
220 bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
221 };
222
223 /**
224 * struct scmi_desc - Description of SoC integration
225 *
226 * @ops: Pointer to the transport specific ops structure
227 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
228 * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
229 * be pending simultaneously in the system. May be overridden by the
230 * get_max_msg op.
231 * @max_msg_size: Maximum size of data payload per message that can be handled.
232 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
233 * in microseconds, for atomic operations.
234 * Only SCMI synchronous commands reported by the platform
235 * to have an execution latency lesser-equal to the threshold
236 * should be considered for atomic mode operation: such
237 * decision is finally left up to the SCMI drivers.
238 * @force_polling: Flag to force this whole transport to use SCMI core polling
239 * mechanism instead of completion interrupts even if available.
240 * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
241 * synchronous-command messages are atomically
242 * completed on .send_message: no need to poll
243 * actively waiting for a response.
244 * Used by core internally only when polling is
245 * selected as a waiting for reply method: i.e.
246 * if a completion irq was found use that anyway.
247 * @atomic_enabled: Flag to indicate that this transport, which is assured not
248 * to sleep anywhere on the TX path, can be used in atomic mode
249 * when requested.
250 */
251 struct scmi_desc {
252 const struct scmi_transport_ops *ops;
253 int max_rx_timeout_ms;
254 int max_msg;
255 int max_msg_size;
256 unsigned int atomic_threshold;
257 const bool force_polling;
258 const bool sync_cmds_completed_on_ret;
259 const bool atomic_enabled;
260 };
261
is_polling_required(struct scmi_chan_info * cinfo,const struct scmi_desc * desc)262 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
263 const struct scmi_desc *desc)
264 {
265 return cinfo->no_completion_irq || desc->force_polling;
266 }
267
is_transport_polling_capable(const struct scmi_desc * desc)268 static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
269 {
270 return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
271 }
272
is_polling_enabled(struct scmi_chan_info * cinfo,const struct scmi_desc * desc)273 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
274 const struct scmi_desc *desc)
275 {
276 return is_polling_required(cinfo, desc) &&
277 is_transport_polling_capable(desc);
278 }
279
280 void scmi_xfer_raw_put(const struct scmi_handle *handle,
281 struct scmi_xfer *xfer);
282 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
283 struct scmi_chan_info *
284 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
285
286 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
287 struct scmi_xfer *xfer);
288
289 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
290 struct scmi_xfer *xfer,
291 unsigned int timeout_ms);
292
293 enum debug_counters {
294 SENT_OK,
295 SENT_FAIL,
296 SENT_FAIL_POLLING_UNSUPPORTED,
297 SENT_FAIL_CHANNEL_NOT_FOUND,
298 RESPONSE_OK,
299 NOTIFICATION_OK,
300 DELAYED_RESPONSE_OK,
301 XFERS_RESPONSE_TIMEOUT,
302 XFERS_RESPONSE_POLLED_TIMEOUT,
303 RESPONSE_POLLED_OK,
304 ERR_MSG_UNEXPECTED,
305 ERR_MSG_INVALID,
306 ERR_MSG_NOMEM,
307 ERR_PROTOCOL,
308 SCMI_DEBUG_COUNTERS_LAST
309 };
310
scmi_inc_count(atomic_t * arr,int stat)311 static inline void scmi_inc_count(atomic_t *arr, int stat)
312 {
313 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
314 atomic_inc(&arr[stat]);
315 }
316
317 enum scmi_bad_msg {
318 MSG_UNEXPECTED = -1,
319 MSG_INVALID = -2,
320 MSG_UNKNOWN = -3,
321 MSG_NOMEM = -4,
322 MSG_MBOX_SPURIOUS = -5,
323 };
324
325 /* Used for compactness and signature validation of the function pointers being
326 * passed.
327 */
328 typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from,
329 size_t count);
330 typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from,
331 size_t count);
332
333 /**
334 * struct scmi_shmem_io_ops - I/O operations to read from/write to
335 * Shared Memory
336 *
337 * @toio: Copy data to the shared memory area
338 * @fromio: Copy data from the shared memory area
339 */
340 struct scmi_shmem_io_ops {
341 shmem_copy_fromio_t fromio;
342 shmem_copy_toio_t toio;
343 };
344
345 /* shmem related declarations */
346 struct scmi_shared_mem;
347
348 /**
349 * struct scmi_shared_mem_operations - Transport core operations for
350 * Shared Memory
351 *
352 * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem
353 * @read_header: Read header of the message currently hold in @shmem
354 * @fetch_response: Copy the message response from @shmem into @xfer
355 * @fetch_notification: Copy the message notification from @shmem into @xfer
356 * @clear_channel: Clear the @shmem channel busy flag
357 * @poll_done: Check if poll has completed for @xfer on @shmem
358 * @channel_free: Check if @shmem channel is marked as free
359 * @channel_intr_enabled: Check is @shmem channel has requested a completion irq
360 * @setup_iomap: Setup IO shared memory for channel @cinfo
361 */
362 struct scmi_shared_mem_operations {
363 void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
364 struct scmi_xfer *xfer,
365 struct scmi_chan_info *cinfo,
366 shmem_copy_toio_t toio);
367 u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
368
369 void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
370 struct scmi_xfer *xfer,
371 shmem_copy_fromio_t fromio);
372 void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
373 size_t max_len, struct scmi_xfer *xfer,
374 shmem_copy_fromio_t fromio);
375 void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
376 bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
377 struct scmi_xfer *xfer);
378 bool (*channel_free)(struct scmi_shared_mem __iomem *shmem);
379 bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
380 void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
381 struct device *dev,
382 bool tx, struct resource *res,
383 struct scmi_shmem_io_ops **ops);
384 };
385
386 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
387
388 /* declarations for message passing transports */
389 struct scmi_msg_payld;
390
391 /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
392 #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
393
394 /**
395 * struct scmi_message_operations - Transport core operations for Message
396 *
397 * @response_size: Get calculated response size for @xfer
398 * @command_size: Get calculated command size for @xfer
399 * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg
400 * @read_header: Read header of the message currently hold in @msg
401 * @fetch_response: Copy the message response from @msg into @xfer
402 * @fetch_notification: Copy the message notification from @msg into @xfer
403 */
404 struct scmi_message_operations {
405 size_t (*response_size)(struct scmi_xfer *xfer);
406 size_t (*command_size)(struct scmi_xfer *xfer);
407 void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
408 u32 (*read_header)(struct scmi_msg_payld *msg);
409 void (*fetch_response)(struct scmi_msg_payld *msg, size_t len,
410 struct scmi_xfer *xfer);
411 void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len,
412 size_t max_len, struct scmi_xfer *xfer);
413 };
414
415 const struct scmi_message_operations *scmi_message_operations_get(void);
416
417 /**
418 * struct scmi_transport_core_operations - Transpoert core operations
419 *
420 * @bad_message_trace: An helper to report a malformed/unexpected message
421 * @rx_callback: Callback to report received messages
422 * @shmem: Datagram operations for shared memory based transports
423 * @msg: Datagram operations for message based transports
424 */
425 struct scmi_transport_core_operations {
426 void (*bad_message_trace)(struct scmi_chan_info *cinfo,
427 u32 msg_hdr, enum scmi_bad_msg err);
428 void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr,
429 void *priv);
430 const struct scmi_shared_mem_operations *shmem;
431 const struct scmi_message_operations *msg;
432 };
433
434 /**
435 * struct scmi_transport - A structure representing a configured transport
436 *
437 * @supplier: Device representing the transport and acting as a supplier for
438 * the core SCMI stack
439 * @desc: Transport descriptor
440 * @core_ops: A pointer to a pointer used by the core SCMI stack to make the
441 * core transport operations accessible to the transports.
442 */
443 struct scmi_transport {
444 struct device *supplier;
445 struct scmi_desc *desc;
446 struct scmi_transport_core_operations **core_ops;
447 };
448
449 #define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\
450 static void __tag##_dev_free(void *data) \
451 { \
452 struct platform_device *spdev = data; \
453 \
454 platform_device_unregister(spdev); \
455 } \
456 \
457 static int __tag##_probe(struct platform_device *pdev) \
458 { \
459 struct device *dev = &pdev->dev; \
460 struct platform_device *spdev; \
461 struct scmi_transport strans; \
462 int ret; \
463 \
464 spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO); \
465 if (!spdev) \
466 return -ENOMEM; \
467 \
468 device_set_of_node_from_dev(&spdev->dev, dev); \
469 \
470 strans.supplier = dev; \
471 strans.desc = &(__desc); \
472 strans.core_ops = &(__core_ops); \
473 \
474 ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
475 if (ret) \
476 goto err; \
477 \
478 ret = platform_device_add(spdev); \
479 if (ret) \
480 goto err; \
481 \
482 return devm_add_action_or_reset(dev, __tag##_dev_free, spdev); \
483 \
484 err: \
485 platform_device_put(spdev); \
486 return ret; \
487 } \
488 \
489 static struct platform_driver __drv = { \
490 .driver = { \
491 .name = #__tag "_transport", \
492 .of_match_table = __match, \
493 }, \
494 .probe = __tag##_probe, \
495 }
496
497 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
498 void *priv);
499 void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
500 #endif /* _SCMI_COMMON_H */
501