xref: /linux/drivers/firmware/arm_scmi/common.h (revision d8283ac2c8fbf2b459672064ebff718fbefae226)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol
4  * driver common header file containing some definitions, structures
5  * and function prototypes used in all the different SCMI protocols.
6  *
7  * Copyright (C) 2018-2024 ARM Ltd.
8  */
9 #ifndef _SCMI_COMMON_H
10 #define _SCMI_COMMON_H
11 
12 #include <linux/bitfield.h>
13 #include <linux/completion.h>
14 #include <linux/device.h>
15 #include <linux/errno.h>
16 #include <linux/kernel.h>
17 #include <linux/hashtable.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/refcount.h>
21 #include <linux/scmi_protocol.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 
25 #include <linux/unaligned.h>
26 
27 #include "protocols.h"
28 #include "notify.h"
29 
30 #define SCMI_MAX_CHANNELS		256
31 
32 #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
33 
34 #define SCMI_SHMEM_MAX_PAYLOAD_SIZE	104
35 
36 enum scmi_error_codes {
37 	SCMI_SUCCESS = 0,	/* Success */
38 	SCMI_ERR_SUPPORT = -1,	/* Not supported */
39 	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
40 	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
41 	SCMI_ERR_ENTRY = -4,	/* Not found */
42 	SCMI_ERR_RANGE = -5,	/* Value out of range */
43 	SCMI_ERR_BUSY = -6,	/* Device busy */
44 	SCMI_ERR_COMMS = -7,	/* Communication Error */
45 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
46 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
47 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
48 };
49 
50 static const int scmi_linux_errmap[] = {
51 	/* better than switch case as long as return value is continuous */
52 	0,			/* SCMI_SUCCESS */
53 	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
54 	-EINVAL,		/* SCMI_ERR_PARAM */
55 	-EACCES,		/* SCMI_ERR_ACCESS */
56 	-ENOENT,		/* SCMI_ERR_ENTRY */
57 	-ERANGE,		/* SCMI_ERR_RANGE */
58 	-EBUSY,			/* SCMI_ERR_BUSY */
59 	-ECOMM,			/* SCMI_ERR_COMMS */
60 	-EIO,			/* SCMI_ERR_GENERIC */
61 	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
62 	-EPROTO,		/* SCMI_ERR_PROTOCOL */
63 };
64 
65 static inline int scmi_to_linux_errno(int errno)
66 {
67 	int err_idx = -errno;
68 
69 	if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
70 		return scmi_linux_errmap[err_idx];
71 	return -EIO;
72 }
73 
74 #define MSG_ID_MASK		GENMASK(7, 0)
75 #define MSG_XTRACT_ID(hdr)	FIELD_GET(MSG_ID_MASK, (hdr))
76 #define MSG_TYPE_MASK		GENMASK(9, 8)
77 #define MSG_XTRACT_TYPE(hdr)	FIELD_GET(MSG_TYPE_MASK, (hdr))
78 #define MSG_TYPE_COMMAND	0
79 #define MSG_TYPE_DELAYED_RESP	2
80 #define MSG_TYPE_NOTIFICATION	3
81 #define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
82 #define MSG_XTRACT_PROT_ID(hdr)	FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
83 #define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
84 #define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
85 #define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
86 
87 /*
88  * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
89  * order to minimize space and collisions, this should equal max_msg, i.e. the
90  * maximum number of in-flight messages on a specific platform, but such value
91  * is only available at runtime while kernel hashtables are statically sized:
92  * pick instead as a fixed static size the maximum number of entries that can
93  * fit the whole table into one 4k page.
94  */
95 #define SCMI_PENDING_XFERS_HT_ORDER_SZ		9
96 
97 /**
98  * pack_scmi_header() - packs and returns 32-bit header
99  *
100  * @hdr: pointer to header containing all the information on message id,
101  *	protocol id, sequence id and type.
102  *
103  * Return: 32-bit packed message header to be sent to the platform.
104  */
105 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
106 {
107 	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
108 		FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
109 		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
110 		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
111 }
112 
113 /**
114  * unpack_scmi_header() - unpacks and records message and protocol id
115  *
116  * @msg_hdr: 32-bit packed message header sent from the platform
117  * @hdr: pointer to header to fetch message and protocol id.
118  */
119 static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
120 {
121 	hdr->id = MSG_XTRACT_ID(msg_hdr);
122 	hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
123 	hdr->type = MSG_XTRACT_TYPE(msg_hdr);
124 }
125 
126 /*
127  * An helper macro to lookup an xfer from the @pending_xfers hashtable
128  * using the message sequence number token as a key.
129  */
130 #define XFER_FIND(__ht, __k)					\
131 ({								\
132 	typeof(__k) k_ = __k;					\
133 	struct scmi_xfer *xfer_ = NULL;				\
134 								\
135 	hash_for_each_possible((__ht), xfer_, node, k_)		\
136 		if (xfer_->hdr.seq == k_)			\
137 			break;					\
138 	xfer_;							\
139 })
140 
141 struct scmi_revision_info *
142 scmi_revision_area_get(const struct scmi_protocol_handle *ph);
143 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
144 				     u8 *prot_imp);
145 
146 extern const struct bus_type scmi_bus_type;
147 
148 #define SCMI_BUS_NOTIFY_DEVICE_REQUEST		0
149 #define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST	1
150 extern struct blocking_notifier_head scmi_requested_devices_nh;
151 
152 struct scmi_device *scmi_device_create(struct device_node *np,
153 				       struct device *parent, int protocol,
154 				       const char *name);
155 void scmi_device_destroy(struct device *parent, int protocol, const char *name);
156 
157 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
158 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
159 
160 /* SCMI Transport */
161 /**
162  * struct scmi_chan_info - Structure representing a SCMI channel information
163  *
164  * @id: An identifier for this channel: this matches the protocol number
165  *      used to initialize this channel
166  * @dev: Reference to device in the SCMI hierarchy corresponding to this
167  *	 channel
168  * @is_p2a: A flag to identify a channel as P2A (RX)
169  * @rx_timeout_ms: The configured RX timeout in milliseconds.
170  * @max_msg_size: Maximum size of message payload.
171  * @handle: Pointer to SCMI entity handle
172  * @no_completion_irq: Flag to indicate that this channel has no completion
173  *		       interrupt mechanism for synchronous commands.
174  *		       This can be dynamically set by transports at run-time
175  *		       inside their provided .chan_setup().
176  * @transport_info: Transport layer related information
177  */
178 struct scmi_chan_info {
179 	int id;
180 	struct device *dev;
181 	bool is_p2a;
182 	unsigned int rx_timeout_ms;
183 	unsigned int max_msg_size;
184 	struct scmi_handle *handle;
185 	bool no_completion_irq;
186 	void *transport_info;
187 };
188 
189 /**
190  * struct scmi_transport_ops - Structure representing a SCMI transport ops
191  *
192  * @chan_available: Callback to check if channel is available or not
193  * @chan_setup: Callback to allocate and setup a channel
194  * @chan_free: Callback to free a channel
195  * @get_max_msg: Optional callback to provide max_msg dynamically
196  *		 Returns the maximum number of messages for the channel type
197  *		 (tx or rx) that can be pending simultaneously in the system
198  * @send_message: Callback to send a message
199  * @mark_txdone: Callback to mark tx as done
200  * @fetch_response: Callback to fetch response
201  * @fetch_notification: Callback to fetch notification
202  * @clear_channel: Callback to clear a channel
203  * @poll_done: Callback to poll transfer status
204  */
205 struct scmi_transport_ops {
206 	bool (*chan_available)(struct device_node *of_node, int idx);
207 	int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
208 			  bool tx);
209 	int (*chan_free)(int id, void *p, void *data);
210 	unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
211 	int (*send_message)(struct scmi_chan_info *cinfo,
212 			    struct scmi_xfer *xfer);
213 	void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
214 			    struct scmi_xfer *xfer);
215 	void (*fetch_response)(struct scmi_chan_info *cinfo,
216 			       struct scmi_xfer *xfer);
217 	void (*fetch_notification)(struct scmi_chan_info *cinfo,
218 				   size_t max_len, struct scmi_xfer *xfer);
219 	void (*clear_channel)(struct scmi_chan_info *cinfo);
220 	bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
221 };
222 
223 /**
224  * struct scmi_desc - Description of SoC integration
225  *
226  * @ops: Pointer to the transport specific ops structure
227  * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
228  * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
229  *	be pending simultaneously in the system. May be overridden by the
230  *	get_max_msg op.
231  * @max_msg_size: Maximum size of data payload per message that can be handled.
232  * @atomic_threshold: Optional system wide DT-configured threshold, expressed
233  *		      in microseconds, for atomic operations.
234  *		      Only SCMI synchronous commands reported by the platform
235  *		      to have an execution latency lesser-equal to the threshold
236  *		      should be considered for atomic mode operation: such
237  *		      decision is finally left up to the SCMI drivers.
238  * @no_completion_irq: Flag to indicate that this transport has no completion
239  *		       interrupt and has to be polled. This is similar to the
240  *		       force_polling below, except this is set via DT property.
241  * @force_polling: Flag to force this whole transport to use SCMI core polling
242  *		   mechanism instead of completion interrupts even if available.
243  * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
244  *				synchronous-command messages are atomically
245  *				completed on .send_message: no need to poll
246  *				actively waiting for a response.
247  *				Used by core internally only when polling is
248  *				selected as a waiting for reply method: i.e.
249  *				if a completion irq was found use that anyway.
250  * @atomic_enabled: Flag to indicate that this transport, which is assured not
251  *		    to sleep anywhere on the TX path, can be used in atomic mode
252  *		    when requested.
253  */
254 struct scmi_desc {
255 	const struct scmi_transport_ops *ops;
256 	int max_rx_timeout_ms;
257 	int max_msg;
258 	int max_msg_size;
259 	unsigned int atomic_threshold;
260 	bool no_completion_irq;
261 	const bool force_polling;
262 	const bool sync_cmds_completed_on_ret;
263 	const bool atomic_enabled;
264 };
265 
266 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
267 				       const struct scmi_desc *desc)
268 {
269 	return cinfo->no_completion_irq || desc->force_polling;
270 }
271 
272 static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
273 {
274 	return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
275 }
276 
277 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
278 				      const struct scmi_desc *desc)
279 {
280 	return is_polling_required(cinfo, desc) &&
281 		is_transport_polling_capable(desc);
282 }
283 
284 void scmi_xfer_raw_put(const struct scmi_handle *handle,
285 		       struct scmi_xfer *xfer);
286 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
287 struct scmi_chan_info *
288 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
289 
290 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
291 				    struct scmi_xfer *xfer);
292 
293 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
294 					    struct scmi_xfer *xfer,
295 					    unsigned int timeout_ms);
296 
297 enum debug_counters {
298 	SENT_OK,
299 	SENT_FAIL,
300 	SENT_FAIL_POLLING_UNSUPPORTED,
301 	SENT_FAIL_CHANNEL_NOT_FOUND,
302 	RESPONSE_OK,
303 	NOTIFICATION_OK,
304 	DELAYED_RESPONSE_OK,
305 	XFERS_RESPONSE_TIMEOUT,
306 	XFERS_RESPONSE_POLLED_TIMEOUT,
307 	RESPONSE_POLLED_OK,
308 	ERR_MSG_UNEXPECTED,
309 	ERR_MSG_INVALID,
310 	ERR_MSG_NOMEM,
311 	ERR_PROTOCOL,
312 	XFERS_INFLIGHT,
313 	SCMI_DEBUG_COUNTERS_LAST
314 };
315 
316 /**
317  * struct scmi_debug_info  - Debug common info
318  * @top_dentry: A reference to the top debugfs dentry
319  * @name: Name of this SCMI instance
320  * @type: Type of this SCMI instance
321  * @is_atomic: Flag to state if the transport of this instance is atomic
322  * @counters: An array of atomic_c's used for tracking statistics (if enabled)
323  */
324 struct scmi_debug_info {
325 	struct dentry *top_dentry;
326 	const char *name;
327 	const char *type;
328 	bool is_atomic;
329 	atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
330 };
331 
332 static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
333 {
334 	if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
335 		if (dbg)
336 			atomic_inc(&dbg->counters[stat]);
337 	}
338 }
339 
340 static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
341 {
342 	if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
343 		if (dbg)
344 			atomic_dec(&dbg->counters[stat]);
345 	}
346 }
347 
348 enum scmi_bad_msg {
349 	MSG_UNEXPECTED = -1,
350 	MSG_INVALID = -2,
351 	MSG_UNKNOWN = -3,
352 	MSG_NOMEM = -4,
353 	MSG_MBOX_SPURIOUS = -5,
354 };
355 
356 /* Used for compactness and signature validation of the function pointers being
357  * passed.
358  */
359 typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from,
360 				  size_t count);
361 typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from,
362 				    size_t count);
363 
364 /**
365  * struct scmi_shmem_io_ops  - I/O operations to read from/write to
366  * Shared Memory
367  *
368  * @toio: Copy data to the shared memory area
369  * @fromio: Copy data from the shared memory area
370  */
371 struct scmi_shmem_io_ops {
372 	shmem_copy_fromio_t fromio;
373 	shmem_copy_toio_t toio;
374 };
375 
376 /* shmem related declarations */
377 struct scmi_shared_mem;
378 
379 /**
380  * struct scmi_shared_mem_operations  - Transport core operations for
381  * Shared Memory
382  *
383  * @tx_prepare: Prepare the @xfer message for transmission on the chosen @shmem
384  * @read_header: Read header of the message currently hold in @shmem
385  * @fetch_response: Copy the message response from @shmem into @xfer
386  * @fetch_notification: Copy the message notification from @shmem into @xfer
387  * @clear_channel: Clear the @shmem channel busy flag
388  * @poll_done: Check if poll has completed for @xfer on @shmem
389  * @channel_free: Check if @shmem channel is marked as free
390  * @channel_intr_enabled: Check is @shmem channel has requested a completion irq
391  * @setup_iomap: Setup IO shared memory for channel @cinfo
392  */
393 struct scmi_shared_mem_operations {
394 	void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
395 			   struct scmi_xfer *xfer,
396 			   struct scmi_chan_info *cinfo,
397 			   shmem_copy_toio_t toio);
398 	u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
399 
400 	void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
401 			       struct scmi_xfer *xfer,
402 			       shmem_copy_fromio_t fromio);
403 	void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
404 				   size_t max_len, struct scmi_xfer *xfer,
405 				   shmem_copy_fromio_t fromio);
406 	void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
407 	bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
408 			  struct scmi_xfer *xfer);
409 	bool (*channel_free)(struct scmi_shared_mem __iomem *shmem);
410 	bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
411 	void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
412 				     struct device *dev,
413 				     bool tx, struct resource *res,
414 				     struct scmi_shmem_io_ops **ops);
415 };
416 
417 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
418 
419 /* declarations for message passing transports */
420 struct scmi_msg_payld;
421 
422 /* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
423 #define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
424 
425 /**
426  * struct scmi_message_operations  - Transport core operations for Message
427  *
428  * @response_size: Get calculated response size for @xfer
429  * @command_size: Get calculated command size for @xfer
430  * @tx_prepare: Prepare the @xfer message for transmission on the provided @msg
431  * @read_header: Read header of the message currently hold in @msg
432  * @fetch_response: Copy the message response from @msg into @xfer
433  * @fetch_notification: Copy the message notification from @msg into @xfer
434  */
435 struct scmi_message_operations {
436 	size_t (*response_size)(struct scmi_xfer *xfer);
437 	size_t (*command_size)(struct scmi_xfer *xfer);
438 	void (*tx_prepare)(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
439 	u32 (*read_header)(struct scmi_msg_payld *msg);
440 	void (*fetch_response)(struct scmi_msg_payld *msg, size_t len,
441 			       struct scmi_xfer *xfer);
442 	void (*fetch_notification)(struct scmi_msg_payld *msg, size_t len,
443 				   size_t max_len, struct scmi_xfer *xfer);
444 };
445 
446 const struct scmi_message_operations *scmi_message_operations_get(void);
447 
448 /**
449  * struct scmi_transport_core_operations  - Transpoert core operations
450  *
451  * @bad_message_trace: An helper to report a malformed/unexpected message
452  * @rx_callback: Callback to report received messages
453  * @shmem: Datagram operations for shared memory based transports
454  * @msg: Datagram operations for message based transports
455  */
456 struct scmi_transport_core_operations {
457 	void (*bad_message_trace)(struct scmi_chan_info *cinfo,
458 				  u32 msg_hdr, enum scmi_bad_msg err);
459 	void (*rx_callback)(struct scmi_chan_info *cinfo, u32 msg_hdr,
460 			    void *priv);
461 	const struct scmi_shared_mem_operations *shmem;
462 	const struct scmi_message_operations *msg;
463 };
464 
465 /**
466  * struct scmi_transport  - A structure representing a configured transport
467  *
468  * @supplier: Device representing the transport and acting as a supplier for
469  *	      the core SCMI stack
470  * @desc: Transport descriptor
471  * @core_ops: A pointer to a pointer used by the core SCMI stack to make the
472  *	      core transport operations accessible to the transports.
473  */
474 struct scmi_transport {
475 	struct device *supplier;
476 	struct scmi_desc desc;
477 	struct scmi_transport_core_operations **core_ops;
478 };
479 
480 #define DEFINE_SCMI_TRANSPORT_DRIVER(__tag, __drv, __desc, __match, __core_ops)\
481 static void __tag##_dev_free(void *data)				       \
482 {									       \
483 	struct platform_device *spdev = data;				       \
484 									       \
485 	platform_device_unregister(spdev);				       \
486 }									       \
487 									       \
488 static int __tag##_probe(struct platform_device *pdev)			       \
489 {									       \
490 	struct device *dev = &pdev->dev;				       \
491 	struct platform_device *spdev;					       \
492 	struct scmi_transport strans;					       \
493 	int ret;							       \
494 									       \
495 	spdev = platform_device_alloc("arm-scmi", PLATFORM_DEVID_AUTO);	       \
496 	if (!spdev)							       \
497 		return -ENOMEM;						       \
498 									       \
499 	device_set_of_node_from_dev(&spdev->dev, dev);			       \
500 									       \
501 	strans.supplier = dev;						       \
502 	memcpy(&strans.desc, &(__desc), sizeof(strans.desc));		       \
503 	strans.core_ops = &(__core_ops);				       \
504 									       \
505 	ret = platform_device_add_data(spdev, &strans, sizeof(strans));	       \
506 	if (ret)							       \
507 		goto err;						       \
508 									       \
509 	spdev->dev.parent = dev;					       \
510 	ret = platform_device_add(spdev);				       \
511 	if (ret)							       \
512 		goto err;						       \
513 									       \
514 	return devm_add_action_or_reset(dev, __tag##_dev_free, spdev);	       \
515 									       \
516 err:									       \
517 	platform_device_put(spdev);					       \
518 	return ret;							       \
519 }									       \
520 									       \
521 static struct platform_driver __drv = {					       \
522 	.driver = {							       \
523 		   .name = #__tag "_transport",				       \
524 		   .of_match_table = __match,				       \
525 		   },							       \
526 	.probe = __tag##_probe,						       \
527 }
528 
529 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
530 					 void *priv);
531 void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
532 int scmi_inflight_count(const struct scmi_handle *handle);
533 #endif /* _SCMI_COMMON_H */
534