xref: /linux/drivers/firmware/arm_scmi/driver.c (revision 0f46f50845ce75bfaba62df0421084d23bb6a72f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018-2025 ARM Ltd.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/kmod.h>
28 #include <linux/ktime.h>
29 #include <linux/hashtable.h>
30 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/platform_device.h>
34 #include <linux/processor.h>
35 #include <linux/refcount.h>
36 #include <linux/slab.h>
37 #include <linux/xarray.h>
38 
39 #include "common.h"
40 #include "notify.h"
41 #include "quirks.h"
42 
43 #include "raw_mode.h"
44 
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/scmi.h>
47 
48 #define SCMI_VENDOR_MODULE_ALIAS_FMT	"scmi-protocol-0x%02x-%s"
49 
50 static DEFINE_IDA(scmi_id);
51 
52 static DEFINE_XARRAY(scmi_protocols);
53 
54 /* List of all SCMI devices active in system */
55 static LIST_HEAD(scmi_list);
56 /* Protection for the entire list */
57 static DEFINE_MUTEX(scmi_list_mutex);
58 /* Track the unique id for the transfers for debug & profiling purpose */
59 static atomic_t transfer_last_id;
60 
61 static struct dentry *scmi_top_dentry;
62 
63 /**
64  * struct scmi_xfers_info - Structure to manage transfer information
65  *
66  * @xfer_alloc_table: Bitmap table for allocated messages.
67  *	Index of this bitmap table is also used for message
68  *	sequence identifier.
69  * @xfer_lock: Protection for message allocation
70  * @max_msg: Maximum number of messages that can be pending
71  * @free_xfers: A free list for available to use xfers. It is initialized with
72  *		a number of xfers equal to the maximum allowed in-flight
73  *		messages.
74  * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
75  *		   currently in-flight messages.
76  */
77 struct scmi_xfers_info {
78 	unsigned long *xfer_alloc_table;
79 	spinlock_t xfer_lock;
80 	int max_msg;
81 	struct hlist_head free_xfers;
82 	DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
83 };
84 
85 /**
86  * struct scmi_protocol_instance  - Describe an initialized protocol instance.
87  * @handle: Reference to the SCMI handle associated to this protocol instance.
88  * @proto: A reference to the protocol descriptor.
89  * @gid: A reference for per-protocol devres management.
90  * @users: A refcount to track effective users of this protocol.
91  * @priv: Reference for optional protocol private data.
92  * @version: Protocol version supported by the platform as detected at runtime.
93  * @negotiated_version: When the platform supports a newer protocol version,
94  *			the agent will try to negotiate with the platform the
95  *			usage of the newest version known to it, since
96  *			backward compatibility is NOT automatically assured.
97  *			This field is NON-zero when a successful negotiation
98  *			has completed.
99  * @ph: An embedded protocol handle that will be passed down to protocol
100  *	initialization code to identify this instance.
101  *
102  * Each protocol is initialized independently once for each SCMI platform in
103  * which is defined by DT and implemented by the SCMI server fw.
104  */
105 struct scmi_protocol_instance {
106 	const struct scmi_handle	*handle;
107 	const struct scmi_protocol	*proto;
108 	void				*gid;
109 	refcount_t			users;
110 	void				*priv;
111 	unsigned int			version;
112 	unsigned int			negotiated_version;
113 	struct scmi_protocol_handle	ph;
114 };
115 
116 #define ph_to_pi(h)	container_of(h, struct scmi_protocol_instance, ph)
117 
118 /**
119  * struct scmi_debug_info  - Debug common info
120  * @top_dentry: A reference to the top debugfs dentry
121  * @name: Name of this SCMI instance
122  * @type: Type of this SCMI instance
123  * @is_atomic: Flag to state if the transport of this instance is atomic
124  * @counters: An array of atomic_c's used for tracking statistics (if enabled)
125  */
126 struct scmi_debug_info {
127 	struct dentry *top_dentry;
128 	const char *name;
129 	const char *type;
130 	bool is_atomic;
131 	atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
132 };
133 
134 /**
135  * struct scmi_info - Structure representing a SCMI instance
136  *
137  * @id: A sequence number starting from zero identifying this instance
138  * @dev: Device pointer
139  * @desc: SoC description for this instance
140  * @version: SCMI revision information containing protocol version,
141  *	implementation version and (sub-)vendor identification.
142  * @handle: Instance of SCMI handle to send to clients
143  * @tx_minfo: Universal Transmit Message management info
144  * @rx_minfo: Universal Receive Message management info
145  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
146  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
147  * @protocols: IDR for protocols' instance descriptors initialized for
148  *	       this SCMI instance: populated on protocol's first attempted
149  *	       usage.
150  * @protocols_mtx: A mutex to protect protocols instances initialization.
151  * @protocols_imp: List of protocols implemented, currently maximum of
152  *		   scmi_revision_info.num_protocols elements allocated by the
153  *		   base protocol
154  * @active_protocols: IDR storing device_nodes for protocols actually defined
155  *		      in the DT and confirmed as implemented by fw.
156  * @notify_priv: Pointer to private data structure specific to notifications.
157  * @node: List head
158  * @users: Number of users of this instance
159  * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
160  * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
161  *		bus
162  * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
163  * @dbg: A pointer to debugfs related data (if any)
164  * @raw: An opaque reference handle used by SCMI Raw mode.
165  */
166 struct scmi_info {
167 	int id;
168 	struct device *dev;
169 	const struct scmi_desc *desc;
170 	struct scmi_revision_info version;
171 	struct scmi_handle handle;
172 	struct scmi_xfers_info tx_minfo;
173 	struct scmi_xfers_info rx_minfo;
174 	struct idr tx_idr;
175 	struct idr rx_idr;
176 	struct idr protocols;
177 	/* Ensure mutual exclusive access to protocols instance array */
178 	struct mutex protocols_mtx;
179 	u8 *protocols_imp;
180 	struct idr active_protocols;
181 	void *notify_priv;
182 	struct list_head node;
183 	int users;
184 	struct notifier_block bus_nb;
185 	struct notifier_block dev_req_nb;
186 	/* Serialize device creation process for this instance */
187 	struct mutex devreq_mtx;
188 	struct scmi_debug_info *dbg;
189 	void *raw;
190 };
191 
192 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
193 #define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo)
194 #define bus_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, bus_nb)
195 #define req_nb_to_scmi_info(nb)	container_of(nb, struct scmi_info, dev_req_nb)
196 
197 static void scmi_rx_callback(struct scmi_chan_info *cinfo,
198 			     u32 msg_hdr, void *priv);
199 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
200 				   u32 msg_hdr, enum scmi_bad_msg err);
201 
202 static struct scmi_transport_core_operations scmi_trans_core_ops = {
203 	.bad_message_trace = scmi_bad_message_trace,
204 	.rx_callback = scmi_rx_callback,
205 };
206 
207 static unsigned long
scmi_vendor_protocol_signature(unsigned int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)208 scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
209 			       char *sub_vendor_id, u32 impl_ver)
210 {
211 	char *signature, *p;
212 	unsigned long hash = 0;
213 
214 	/* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
215 	signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
216 			      vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
217 	if (!signature)
218 		return 0;
219 
220 	p = signature;
221 	while (*p)
222 		hash = partial_name_hash(tolower(*p++), hash);
223 	hash = end_name_hash(hash);
224 
225 	kfree(signature);
226 
227 	return hash;
228 }
229 
230 static unsigned long
scmi_protocol_key_calculate(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)231 scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
232 			    char *sub_vendor_id, u32 impl_ver)
233 {
234 	if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
235 		return protocol_id;
236 	else
237 		return scmi_vendor_protocol_signature(protocol_id, vendor_id,
238 						      sub_vendor_id, impl_ver);
239 }
240 
241 static const struct scmi_protocol *
__scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)242 __scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
243 			      char *sub_vendor_id, u32 impl_ver)
244 {
245 	unsigned long key;
246 	struct scmi_protocol *proto = NULL;
247 
248 	key = scmi_protocol_key_calculate(protocol_id, vendor_id,
249 					  sub_vendor_id, impl_ver);
250 	if (key)
251 		proto = xa_load(&scmi_protocols, key);
252 
253 	return proto;
254 }
255 
256 static const struct scmi_protocol *
scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)257 scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
258 			    char *sub_vendor_id, u32 impl_ver)
259 {
260 	const struct scmi_protocol *proto = NULL;
261 
262 	/* Searching for closest match ...*/
263 	proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
264 					      sub_vendor_id, impl_ver);
265 	if (proto)
266 		return proto;
267 
268 	/* Any match just on vendor/sub_vendor ? */
269 	if (impl_ver) {
270 		proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
271 						      sub_vendor_id, 0);
272 		if (proto)
273 			return proto;
274 	}
275 
276 	/* Any match just on the vendor ? */
277 	if (sub_vendor_id)
278 		proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
279 						      NULL, 0);
280 	return proto;
281 }
282 
283 static const struct scmi_protocol *
scmi_vendor_protocol_get(int protocol_id,struct scmi_revision_info * version)284 scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
285 {
286 	const struct scmi_protocol *proto;
287 
288 	proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
289 					    version->sub_vendor_id,
290 					    version->impl_ver);
291 	if (!proto) {
292 		int ret;
293 
294 		pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
295 			 protocol_id, version->vendor_id);
296 
297 		/* Note that vendor_id is mandatory for vendor protocols */
298 		ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
299 				     protocol_id, version->vendor_id);
300 		if (ret) {
301 			pr_warn("Problem loading module for protocol 0x%x\n",
302 				protocol_id);
303 			return NULL;
304 		}
305 
306 		/* Lookup again, once modules loaded */
307 		proto = scmi_vendor_protocol_lookup(protocol_id,
308 						    version->vendor_id,
309 						    version->sub_vendor_id,
310 						    version->impl_ver);
311 	}
312 
313 	if (proto)
314 		pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
315 			protocol_id, proto->vendor_id ?: "",
316 			proto->sub_vendor_id ?: "", proto->impl_ver);
317 
318 	return proto;
319 }
320 
321 static const struct scmi_protocol *
scmi_protocol_get(int protocol_id,struct scmi_revision_info * version)322 scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
323 {
324 	const struct scmi_protocol *proto = NULL;
325 
326 	if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
327 		proto = xa_load(&scmi_protocols, protocol_id);
328 	else
329 		proto = scmi_vendor_protocol_get(protocol_id, version);
330 
331 	if (!proto || !try_module_get(proto->owner)) {
332 		pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
333 		return NULL;
334 	}
335 
336 	pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
337 
338 	return proto;
339 }
340 
scmi_protocol_put(const struct scmi_protocol * proto)341 static void scmi_protocol_put(const struct scmi_protocol *proto)
342 {
343 	if (proto)
344 		module_put(proto->owner);
345 }
346 
scmi_vendor_protocol_check(const struct scmi_protocol * proto)347 static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
348 {
349 	if (!proto->vendor_id) {
350 		pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
351 		return -EINVAL;
352 	}
353 
354 	if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
355 		pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
356 		return -EINVAL;
357 	}
358 
359 	if (proto->sub_vendor_id &&
360 	    strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
361 		pr_err("malformed sub_vendor_id for protocol 0x%x\n",
362 		       proto->id);
363 		return -EINVAL;
364 	}
365 
366 	return 0;
367 }
368 
scmi_protocol_register(const struct scmi_protocol * proto)369 int scmi_protocol_register(const struct scmi_protocol *proto)
370 {
371 	int ret;
372 	unsigned long key;
373 
374 	if (!proto) {
375 		pr_err("invalid protocol\n");
376 		return -EINVAL;
377 	}
378 
379 	if (!proto->instance_init) {
380 		pr_err("missing init for protocol 0x%x\n", proto->id);
381 		return -EINVAL;
382 	}
383 
384 	if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
385 	    scmi_vendor_protocol_check(proto))
386 		return -EINVAL;
387 
388 	/*
389 	 * Calculate a protocol key to register this protocol with the core;
390 	 * key value 0 is considered invalid.
391 	 */
392 	key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
393 					  proto->sub_vendor_id,
394 					  proto->impl_ver);
395 	if (!key)
396 		return -EINVAL;
397 
398 	ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
399 	if (ret) {
400 		pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
401 		       proto->id, ret);
402 		return ret;
403 	}
404 
405 	pr_debug("Registered SCMI Protocol 0x%x - %s  %s  0x%08X\n",
406 		 proto->id, proto->vendor_id, proto->sub_vendor_id,
407 		 proto->impl_ver);
408 
409 	return 0;
410 }
411 EXPORT_SYMBOL_GPL(scmi_protocol_register);
412 
scmi_protocol_unregister(const struct scmi_protocol * proto)413 void scmi_protocol_unregister(const struct scmi_protocol *proto)
414 {
415 	unsigned long key;
416 
417 	key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
418 					  proto->sub_vendor_id,
419 					  proto->impl_ver);
420 	if (!key)
421 		return;
422 
423 	xa_erase(&scmi_protocols, key);
424 
425 	pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
426 }
427 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
428 
429 /**
430  * scmi_create_protocol_devices  - Create devices for all pending requests for
431  * this SCMI instance.
432  *
433  * @np: The device node describing the protocol
434  * @info: The SCMI instance descriptor
435  * @prot_id: The protocol ID
436  * @name: The optional name of the device to be created: if not provided this
437  *	  call will lead to the creation of all the devices currently requested
438  *	  for the specified protocol.
439  */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)440 static void scmi_create_protocol_devices(struct device_node *np,
441 					 struct scmi_info *info,
442 					 int prot_id, const char *name)
443 {
444 	mutex_lock(&info->devreq_mtx);
445 	scmi_device_create(np, info->dev, prot_id, name);
446 	mutex_unlock(&info->devreq_mtx);
447 }
448 
scmi_destroy_protocol_devices(struct scmi_info * info,int prot_id,const char * name)449 static void scmi_destroy_protocol_devices(struct scmi_info *info,
450 					  int prot_id, const char *name)
451 {
452 	mutex_lock(&info->devreq_mtx);
453 	scmi_device_destroy(info->dev, prot_id, name);
454 	mutex_unlock(&info->devreq_mtx);
455 }
456 
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)457 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
458 					 void *priv)
459 {
460 	struct scmi_info *info = handle_to_scmi_info(handle);
461 
462 	info->notify_priv = priv;
463 	/* Ensure updated protocol private date are visible */
464 	smp_wmb();
465 }
466 
scmi_notification_instance_data_get(const struct scmi_handle * handle)467 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
468 {
469 	struct scmi_info *info = handle_to_scmi_info(handle);
470 
471 	/* Ensure protocols_private_data has been updated */
472 	smp_rmb();
473 	return info->notify_priv;
474 }
475 
476 /**
477  * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
478  *
479  * @minfo: Pointer to Tx/Rx Message management info based on channel type
480  * @xfer: The xfer to act upon
481  *
482  * Pick the next unused monotonically increasing token and set it into
483  * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
484  * reuse of freshly completed or timed-out xfers, thus mitigating the risk
485  * of incorrect association of a late and expired xfer with a live in-flight
486  * transaction, both happening to re-use the same token identifier.
487  *
488  * Since platform is NOT required to answer our request in-order we should
489  * account for a few rare but possible scenarios:
490  *
491  *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
492  *    using find_next_zero_bit() starting from candidate next_token bit
493  *
494  *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
495  *    are plenty of free tokens at start, so try a second pass using
496  *    find_next_zero_bit() and starting from 0.
497  *
498  *  X = used in-flight
499  *
500  * Normal
501  * ------
502  *
503  *		|- xfer_id picked
504  *   -----------+----------------------------------------------------------
505  *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
506  *   ----------------------------------------------------------------------
507  *		^
508  *		|- next_token
509  *
510  * Out-of-order pending at start
511  * -----------------------------
512  *
513  *	  |- xfer_id picked, last_token fixed
514  *   -----+----------------------------------------------------------------
515  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
516  *   ----------------------------------------------------------------------
517  *    ^
518  *    |- next_token
519  *
520  *
521  * Out-of-order pending at end
522  * ---------------------------
523  *
524  *	  |- xfer_id picked, last_token fixed
525  *   -----+----------------------------------------------------------------
526  *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
527  *   ----------------------------------------------------------------------
528  *								^
529  *								|- next_token
530  *
531  * Context: Assumes to be called with @xfer_lock already acquired.
532  *
533  * Return: 0 on Success or error
534  */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)535 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
536 			       struct scmi_xfer *xfer)
537 {
538 	unsigned long xfer_id, next_token;
539 
540 	/*
541 	 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
542 	 * using the pre-allocated transfer_id as a base.
543 	 * Note that the global transfer_id is shared across all message types
544 	 * so there could be holes in the allocated set of monotonic sequence
545 	 * numbers, but that is going to limit the effectiveness of the
546 	 * mitigation only in very rare limit conditions.
547 	 */
548 	next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
549 
550 	/* Pick the next available xfer_id >= next_token */
551 	xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
552 				     MSG_TOKEN_MAX, next_token);
553 	if (xfer_id == MSG_TOKEN_MAX) {
554 		/*
555 		 * After heavily out-of-order responses, there are no free
556 		 * tokens ahead, but only at start of xfer_alloc_table so
557 		 * try again from the beginning.
558 		 */
559 		xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
560 					     MSG_TOKEN_MAX, 0);
561 		/*
562 		 * Something is wrong if we got here since there can be a
563 		 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
564 		 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
565 		 */
566 		if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
567 			return -ENOMEM;
568 	}
569 
570 	/* Update +/- last_token accordingly if we skipped some hole */
571 	if (xfer_id != next_token)
572 		atomic_add((int)(xfer_id - next_token), &transfer_last_id);
573 
574 	xfer->hdr.seq = (u16)xfer_id;
575 
576 	return 0;
577 }
578 
579 /**
580  * scmi_xfer_token_clear  - Release the token
581  *
582  * @minfo: Pointer to Tx/Rx Message management info based on channel type
583  * @xfer: The xfer to act upon
584  */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)585 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
586 					 struct scmi_xfer *xfer)
587 {
588 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
589 }
590 
591 /**
592  * scmi_xfer_inflight_register_unlocked  - Register the xfer as in-flight
593  *
594  * @xfer: The xfer to register
595  * @minfo: Pointer to Tx/Rx Message management info based on channel type
596  *
597  * Note that this helper assumes that the xfer to be registered as in-flight
598  * had been built using an xfer sequence number which still corresponds to a
599  * free slot in the xfer_alloc_table.
600  *
601  * Context: Assumes to be called with @xfer_lock already acquired.
602  */
603 static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)604 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
605 				     struct scmi_xfers_info *minfo)
606 {
607 	/* In this context minfo will be tx_minfo due to the xfer pending */
608 	struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
609 
610 	/* Set in-flight */
611 	set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
612 	hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
613 	scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
614 
615 	xfer->pending = true;
616 }
617 
618 /**
619  * scmi_xfer_inflight_register  - Try to register an xfer as in-flight
620  *
621  * @xfer: The xfer to register
622  * @minfo: Pointer to Tx/Rx Message management info based on channel type
623  *
624  * Note that this helper does NOT assume anything about the sequence number
625  * that was baked into the provided xfer, so it checks at first if it can
626  * be mapped to a free slot and fails with an error if another xfer with the
627  * same sequence number is currently still registered as in-flight.
628  *
629  * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
630  *	   could not rbe mapped to a free slot in the xfer_alloc_table.
631  */
scmi_xfer_inflight_register(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)632 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
633 				       struct scmi_xfers_info *minfo)
634 {
635 	int ret = 0;
636 	unsigned long flags;
637 
638 	spin_lock_irqsave(&minfo->xfer_lock, flags);
639 	if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
640 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
641 	else
642 		ret = -EBUSY;
643 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
644 
645 	return ret;
646 }
647 
648 /**
649  * scmi_xfer_raw_inflight_register  - An helper to register the given xfer as in
650  * flight on the TX channel, if possible.
651  *
652  * @handle: Pointer to SCMI entity handle
653  * @xfer: The xfer to register
654  *
655  * Return: 0 on Success, error otherwise
656  */
scmi_xfer_raw_inflight_register(const struct scmi_handle * handle,struct scmi_xfer * xfer)657 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
658 				    struct scmi_xfer *xfer)
659 {
660 	struct scmi_info *info = handle_to_scmi_info(handle);
661 
662 	return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
663 }
664 
665 /**
666  * scmi_xfer_pending_set  - Pick a proper sequence number and mark the xfer
667  * as pending in-flight
668  *
669  * @xfer: The xfer to act upon
670  * @minfo: Pointer to Tx/Rx Message management info based on channel type
671  *
672  * Return: 0 on Success or error otherwise
673  */
scmi_xfer_pending_set(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)674 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
675 					struct scmi_xfers_info *minfo)
676 {
677 	int ret;
678 	unsigned long flags;
679 
680 	spin_lock_irqsave(&minfo->xfer_lock, flags);
681 	/* Set a new monotonic token as the xfer sequence number */
682 	ret = scmi_xfer_token_set(minfo, xfer);
683 	if (!ret)
684 		scmi_xfer_inflight_register_unlocked(xfer, minfo);
685 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
686 
687 	return ret;
688 }
689 
690 /**
691  * scmi_xfer_get() - Allocate one message
692  *
693  * @handle: Pointer to SCMI entity handle
694  * @minfo: Pointer to Tx/Rx Message management info based on channel type
695  *
696  * Helper function which is used by various message functions that are
697  * exposed to clients of this driver for allocating a message traffic event.
698  *
699  * Picks an xfer from the free list @free_xfers (if any available) and perform
700  * a basic initialization.
701  *
702  * Note that, at this point, still no sequence number is assigned to the
703  * allocated xfer, nor it is registered as a pending transaction.
704  *
705  * The successfully initialized xfer is refcounted.
706  *
707  * Context: Holds @xfer_lock while manipulating @free_xfers.
708  *
709  * Return: An initialized xfer if all went fine, else pointer error.
710  */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)711 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
712 				       struct scmi_xfers_info *minfo)
713 {
714 	unsigned long flags;
715 	struct scmi_xfer *xfer;
716 
717 	spin_lock_irqsave(&minfo->xfer_lock, flags);
718 	if (hlist_empty(&minfo->free_xfers)) {
719 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
720 		return ERR_PTR(-ENOMEM);
721 	}
722 
723 	/* grab an xfer from the free_list */
724 	xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
725 	hlist_del_init(&xfer->node);
726 
727 	/*
728 	 * Allocate transfer_id early so that can be used also as base for
729 	 * monotonic sequence number generation if needed.
730 	 */
731 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
732 
733 	refcount_set(&xfer->users, 1);
734 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
735 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
736 
737 	return xfer;
738 }
739 
740 /**
741  * scmi_xfer_raw_get  - Helper to get a bare free xfer from the TX channel
742  *
743  * @handle: Pointer to SCMI entity handle
744  *
745  * Note that xfer is taken from the TX channel structures.
746  *
747  * Return: A valid xfer on Success, or an error-pointer otherwise
748  */
scmi_xfer_raw_get(const struct scmi_handle * handle)749 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
750 {
751 	struct scmi_xfer *xfer;
752 	struct scmi_info *info = handle_to_scmi_info(handle);
753 
754 	xfer = scmi_xfer_get(handle, &info->tx_minfo);
755 	if (!IS_ERR(xfer))
756 		xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
757 
758 	return xfer;
759 }
760 
761 /**
762  * scmi_xfer_raw_channel_get  - Helper to get a reference to the proper channel
763  * to use for a specific protocol_id Raw transaction.
764  *
765  * @handle: Pointer to SCMI entity handle
766  * @protocol_id: Identifier of the protocol
767  *
768  * Note that in a regular SCMI stack, usually, a protocol has to be defined in
769  * the DT to have an associated channel and be usable; but in Raw mode any
770  * protocol in range is allowed, re-using the Base channel, so as to enable
771  * fuzzing on any protocol without the need of a fully compiled DT.
772  *
773  * Return: A reference to the channel to use, or an ERR_PTR
774  */
775 struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle * handle,u8 protocol_id)776 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
777 {
778 	struct scmi_chan_info *cinfo;
779 	struct scmi_info *info = handle_to_scmi_info(handle);
780 
781 	cinfo = idr_find(&info->tx_idr, protocol_id);
782 	if (!cinfo) {
783 		if (protocol_id == SCMI_PROTOCOL_BASE)
784 			return ERR_PTR(-EINVAL);
785 		/* Use Base channel for protocols not defined for DT */
786 		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
787 		if (!cinfo)
788 			return ERR_PTR(-EINVAL);
789 		dev_warn_once(handle->dev,
790 			      "Using Base channel for protocol 0x%X\n",
791 			      protocol_id);
792 	}
793 
794 	return cinfo;
795 }
796 
797 /**
798  * __scmi_xfer_put() - Release a message
799  *
800  * @minfo: Pointer to Tx/Rx Message management info based on channel type
801  * @xfer: message that was reserved by scmi_xfer_get
802  *
803  * After refcount check, possibly release an xfer, clearing the token slot,
804  * removing xfer from @pending_xfers and putting it back into free_xfers.
805  *
806  * This holds a spinlock to maintain integrity of internal data structures.
807  */
808 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)809 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
810 {
811 	unsigned long flags;
812 
813 	spin_lock_irqsave(&minfo->xfer_lock, flags);
814 	if (refcount_dec_and_test(&xfer->users)) {
815 		if (xfer->pending) {
816 			struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
817 
818 			scmi_xfer_token_clear(minfo, xfer);
819 			hash_del(&xfer->node);
820 			xfer->pending = false;
821 
822 			scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
823 		}
824 		hlist_add_head(&xfer->node, &minfo->free_xfers);
825 	}
826 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
827 }
828 
829 /**
830  * scmi_xfer_raw_put  - Release an xfer that was taken by @scmi_xfer_raw_get
831  *
832  * @handle: Pointer to SCMI entity handle
833  * @xfer: A reference to the xfer to put
834  *
835  * Note that as with other xfer_put() handlers the xfer is really effectively
836  * released only if there are no more users on the system.
837  */
scmi_xfer_raw_put(const struct scmi_handle * handle,struct scmi_xfer * xfer)838 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
839 {
840 	struct scmi_info *info = handle_to_scmi_info(handle);
841 
842 	xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
843 	xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
844 	return __scmi_xfer_put(&info->tx_minfo, xfer);
845 }
846 
847 /**
848  * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
849  *
850  * @minfo: Pointer to Tx/Rx Message management info based on channel type
851  * @xfer_id: Token ID to lookup in @pending_xfers
852  *
853  * Refcounting is untouched.
854  *
855  * Context: Assumes to be called with @xfer_lock already acquired.
856  *
857  * Return: A valid xfer on Success or error otherwise
858  */
859 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)860 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
861 {
862 	struct scmi_xfer *xfer = NULL;
863 
864 	if (test_bit(xfer_id, minfo->xfer_alloc_table))
865 		xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
866 
867 	return xfer ?: ERR_PTR(-EINVAL);
868 }
869 
870 /**
871  * scmi_bad_message_trace  - A helper to trace weird messages
872  *
873  * @cinfo: A reference to the channel descriptor on which the message was
874  *	   received
875  * @msg_hdr: Message header to track
876  * @err: A specific error code used as a status value in traces.
877  *
878  * This helper can be used to trace any kind of weird, incomplete, unexpected,
879  * timed-out message that arrives and as such, can be traced only referring to
880  * the header content, since the payload is missing/unreliable.
881  */
scmi_bad_message_trace(struct scmi_chan_info * cinfo,u32 msg_hdr,enum scmi_bad_msg err)882 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
883 				   enum scmi_bad_msg err)
884 {
885 	char *tag;
886 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
887 
888 	switch (MSG_XTRACT_TYPE(msg_hdr)) {
889 	case MSG_TYPE_COMMAND:
890 		tag = "!RESP";
891 		break;
892 	case MSG_TYPE_DELAYED_RESP:
893 		tag = "!DLYD";
894 		break;
895 	case MSG_TYPE_NOTIFICATION:
896 		tag = "!NOTI";
897 		break;
898 	default:
899 		tag = "!UNKN";
900 		break;
901 	}
902 
903 	trace_scmi_msg_dump(info->id, cinfo->id,
904 			    MSG_XTRACT_PROT_ID(msg_hdr),
905 			    MSG_XTRACT_ID(msg_hdr), tag,
906 			    MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
907 }
908 
909 /**
910  * scmi_msg_response_validate  - Validate message type against state of related
911  * xfer
912  *
913  * @cinfo: A reference to the channel descriptor.
914  * @msg_type: Message type to check
915  * @xfer: A reference to the xfer to validate against @msg_type
916  *
917  * This function checks if @msg_type is congruent with the current state of
918  * a pending @xfer; if an asynchronous delayed response is received before the
919  * related synchronous response (Out-of-Order Delayed Response) the missing
920  * synchronous response is assumed to be OK and completed, carrying on with the
921  * Delayed Response: this is done to address the case in which the underlying
922  * SCMI transport can deliver such out-of-order responses.
923  *
924  * Context: Assumes to be called with xfer->lock already acquired.
925  *
926  * Return: 0 on Success, error otherwise
927  */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)928 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
929 					     u8 msg_type,
930 					     struct scmi_xfer *xfer)
931 {
932 	/*
933 	 * Even if a response was indeed expected on this slot at this point,
934 	 * a buggy platform could wrongly reply feeding us an unexpected
935 	 * delayed response we're not prepared to handle: bail-out safely
936 	 * blaming firmware.
937 	 */
938 	if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
939 		dev_err(cinfo->dev,
940 			"Delayed Response for %d not expected! Buggy F/W ?\n",
941 			xfer->hdr.seq);
942 		return -EINVAL;
943 	}
944 
945 	switch (xfer->state) {
946 	case SCMI_XFER_SENT_OK:
947 		if (msg_type == MSG_TYPE_DELAYED_RESP) {
948 			/*
949 			 * Delayed Response expected but delivered earlier.
950 			 * Assume message RESPONSE was OK and skip state.
951 			 */
952 			xfer->hdr.status = SCMI_SUCCESS;
953 			xfer->state = SCMI_XFER_RESP_OK;
954 			complete(&xfer->done);
955 			dev_warn(cinfo->dev,
956 				 "Received valid OoO Delayed Response for %d\n",
957 				 xfer->hdr.seq);
958 		}
959 		break;
960 	case SCMI_XFER_RESP_OK:
961 		if (msg_type != MSG_TYPE_DELAYED_RESP)
962 			return -EINVAL;
963 		break;
964 	case SCMI_XFER_DRESP_OK:
965 		/* No further message expected once in SCMI_XFER_DRESP_OK */
966 		return -EINVAL;
967 	}
968 
969 	return 0;
970 }
971 
972 /**
973  * scmi_xfer_state_update  - Update xfer state
974  *
975  * @xfer: A reference to the xfer to update
976  * @msg_type: Type of message being processed.
977  *
978  * Note that this message is assumed to have been already successfully validated
979  * by @scmi_msg_response_validate(), so here we just update the state.
980  *
981  * Context: Assumes to be called on an xfer exclusively acquired using the
982  *	    busy flag.
983  */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)984 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
985 {
986 	xfer->hdr.type = msg_type;
987 
988 	/* Unknown command types were already discarded earlier */
989 	if (xfer->hdr.type == MSG_TYPE_COMMAND)
990 		xfer->state = SCMI_XFER_RESP_OK;
991 	else
992 		xfer->state = SCMI_XFER_DRESP_OK;
993 }
994 
scmi_xfer_acquired(struct scmi_xfer * xfer)995 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
996 {
997 	int ret;
998 
999 	ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
1000 
1001 	return ret == SCMI_XFER_FREE;
1002 }
1003 
1004 /**
1005  * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
1006  *
1007  * @cinfo: A reference to the channel descriptor.
1008  * @msg_hdr: A message header to use as lookup key
1009  *
1010  * When a valid xfer is found for the sequence number embedded in the provided
1011  * msg_hdr, reference counting is properly updated and exclusive access to this
1012  * xfer is granted till released with @scmi_xfer_command_release.
1013  *
1014  * Return: A valid @xfer on Success or error otherwise.
1015  */
1016 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)1017 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
1018 {
1019 	int ret;
1020 	unsigned long flags;
1021 	struct scmi_xfer *xfer;
1022 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1023 	struct scmi_xfers_info *minfo = &info->tx_minfo;
1024 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1025 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
1026 
1027 	/* Are we even expecting this? */
1028 	spin_lock_irqsave(&minfo->xfer_lock, flags);
1029 	xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
1030 	if (IS_ERR(xfer)) {
1031 		dev_err(cinfo->dev,
1032 			"Message for %d type %d is not expected!\n",
1033 			xfer_id, msg_type);
1034 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1035 
1036 		scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
1037 		scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
1038 
1039 		return xfer;
1040 	}
1041 	refcount_inc(&xfer->users);
1042 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1043 
1044 	spin_lock_irqsave(&xfer->lock, flags);
1045 	ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
1046 	/*
1047 	 * If a pending xfer was found which was also in a congruent state with
1048 	 * the received message, acquire exclusive access to it setting the busy
1049 	 * flag.
1050 	 * Spins only on the rare limit condition of concurrent reception of
1051 	 * RESP and DRESP for the same xfer.
1052 	 */
1053 	if (!ret) {
1054 		spin_until_cond(scmi_xfer_acquired(xfer));
1055 		scmi_xfer_state_update(xfer, msg_type);
1056 	}
1057 	spin_unlock_irqrestore(&xfer->lock, flags);
1058 
1059 	if (ret) {
1060 		dev_err(cinfo->dev,
1061 			"Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
1062 			msg_type, xfer_id, msg_hdr, xfer->state);
1063 
1064 		scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
1065 		scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
1066 
1067 		/* On error the refcount incremented above has to be dropped */
1068 		__scmi_xfer_put(minfo, xfer);
1069 		xfer = ERR_PTR(-EINVAL);
1070 	}
1071 
1072 	return xfer;
1073 }
1074 
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)1075 static inline void scmi_xfer_command_release(struct scmi_info *info,
1076 					     struct scmi_xfer *xfer)
1077 {
1078 	atomic_set(&xfer->busy, SCMI_XFER_FREE);
1079 	__scmi_xfer_put(&info->tx_minfo, xfer);
1080 }
1081 
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)1082 static inline void scmi_clear_channel(struct scmi_info *info,
1083 				      struct scmi_chan_info *cinfo)
1084 {
1085 	if (!cinfo->is_p2a) {
1086 		dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1087 		return;
1088 	}
1089 
1090 	if (info->desc->ops->clear_channel)
1091 		info->desc->ops->clear_channel(cinfo);
1092 }
1093 
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1094 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
1095 				     u32 msg_hdr, void *priv)
1096 {
1097 	struct scmi_xfer *xfer;
1098 	struct device *dev = cinfo->dev;
1099 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1100 	struct scmi_xfers_info *minfo = &info->rx_minfo;
1101 	ktime_t ts;
1102 
1103 	ts = ktime_get_boottime();
1104 	xfer = scmi_xfer_get(cinfo->handle, minfo);
1105 	if (IS_ERR(xfer)) {
1106 		dev_err(dev, "failed to get free message slot (%ld)\n",
1107 			PTR_ERR(xfer));
1108 
1109 		scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
1110 		scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
1111 
1112 		scmi_clear_channel(info, cinfo);
1113 		return;
1114 	}
1115 
1116 	unpack_scmi_header(msg_hdr, &xfer->hdr);
1117 	if (priv)
1118 		/* Ensure order between xfer->priv store and following ops */
1119 		smp_store_mb(xfer->priv, priv);
1120 	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1121 					    xfer);
1122 
1123 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1124 			    xfer->hdr.id, "NOTI", xfer->hdr.seq,
1125 			    xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1126 	scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
1127 
1128 	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1129 		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1130 
1131 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1132 			   xfer->hdr.protocol_id, xfer->hdr.seq,
1133 			   MSG_TYPE_NOTIFICATION);
1134 
1135 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1136 		xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1137 		scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1138 					cinfo->id);
1139 	}
1140 
1141 	__scmi_xfer_put(minfo, xfer);
1142 
1143 	scmi_clear_channel(info, cinfo);
1144 }
1145 
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1146 static void scmi_handle_response(struct scmi_chan_info *cinfo,
1147 				 u32 msg_hdr, void *priv)
1148 {
1149 	struct scmi_xfer *xfer;
1150 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1151 
1152 	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
1153 	if (IS_ERR(xfer)) {
1154 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
1155 			scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1156 
1157 		if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
1158 			scmi_clear_channel(info, cinfo);
1159 		return;
1160 	}
1161 
1162 	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1163 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1164 		xfer->rx.len = info->desc->max_msg_size;
1165 
1166 	if (priv)
1167 		/* Ensure order between xfer->priv store and following ops */
1168 		smp_store_mb(xfer->priv, priv);
1169 	info->desc->ops->fetch_response(cinfo, xfer);
1170 
1171 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1172 			    xfer->hdr.id,
1173 			    xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1174 			    (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
1175 			    (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
1176 			    xfer->hdr.seq, xfer->hdr.status,
1177 			    xfer->rx.buf, xfer->rx.len);
1178 
1179 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1180 			   xfer->hdr.protocol_id, xfer->hdr.seq,
1181 			   xfer->hdr.type);
1182 
1183 	if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1184 		scmi_clear_channel(info, cinfo);
1185 		complete(xfer->async_done);
1186 		scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
1187 	} else {
1188 		complete(&xfer->done);
1189 		scmi_inc_count(info->dbg->counters, RESPONSE_OK);
1190 	}
1191 
1192 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1193 		/*
1194 		 * When in polling mode avoid to queue the Raw xfer on the IRQ
1195 		 * RX path since it will be already queued at the end of the TX
1196 		 * poll loop.
1197 		 */
1198 		if (!xfer->hdr.poll_completion ||
1199 		    xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1200 			scmi_raw_message_report(info->raw, xfer,
1201 						SCMI_RAW_REPLY_QUEUE,
1202 						cinfo->id);
1203 	}
1204 
1205 	scmi_xfer_command_release(info, xfer);
1206 }
1207 
1208 /**
1209  * scmi_rx_callback() - callback for receiving messages
1210  *
1211  * @cinfo: SCMI channel info
1212  * @msg_hdr: Message header
1213  * @priv: Transport specific private data.
1214  *
1215  * Processes one received message to appropriate transfer information and
1216  * signals completion of the transfer.
1217  *
1218  * NOTE: This function will be invoked in IRQ context, hence should be
1219  * as optimal as possible.
1220  */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1221 static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
1222 			     void *priv)
1223 {
1224 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1225 
1226 	switch (msg_type) {
1227 	case MSG_TYPE_NOTIFICATION:
1228 		scmi_handle_notification(cinfo, msg_hdr, priv);
1229 		break;
1230 	case MSG_TYPE_COMMAND:
1231 	case MSG_TYPE_DELAYED_RESP:
1232 		scmi_handle_response(cinfo, msg_hdr, priv);
1233 		break;
1234 	default:
1235 		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1236 		scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
1237 		break;
1238 	}
1239 }
1240 
1241 /**
1242  * xfer_put() - Release a transmit message
1243  *
1244  * @ph: Pointer to SCMI protocol handle
1245  * @xfer: message that was reserved by xfer_get_init
1246  */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1247 static void xfer_put(const struct scmi_protocol_handle *ph,
1248 		     struct scmi_xfer *xfer)
1249 {
1250 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1251 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1252 
1253 	__scmi_xfer_put(&info->tx_minfo, xfer);
1254 }
1255 
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop,bool * ooo)1256 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1257 				      struct scmi_xfer *xfer, ktime_t stop,
1258 				      bool *ooo)
1259 {
1260 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1261 
1262 	/*
1263 	 * Poll also on xfer->done so that polling can be forcibly terminated
1264 	 * in case of out-of-order receptions of delayed responses
1265 	 */
1266 	return info->desc->ops->poll_done(cinfo, xfer) ||
1267 	       (*ooo = try_wait_for_completion(&xfer->done)) ||
1268 	       ktime_after(ktime_get(), stop);
1269 }
1270 
scmi_wait_for_reply(struct device * dev,const struct scmi_desc * desc,struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1271 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1272 			       struct scmi_chan_info *cinfo,
1273 			       struct scmi_xfer *xfer, unsigned int timeout_ms)
1274 {
1275 	int ret = 0;
1276 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1277 
1278 	if (xfer->hdr.poll_completion) {
1279 		/*
1280 		 * Real polling is needed only if transport has NOT declared
1281 		 * itself to support synchronous commands replies.
1282 		 */
1283 		if (!desc->sync_cmds_completed_on_ret) {
1284 			bool ooo = false;
1285 
1286 			/*
1287 			 * Poll on xfer using transport provided .poll_done();
1288 			 * assumes no completion interrupt was available.
1289 			 */
1290 			ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1291 
1292 			spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
1293 								  stop, &ooo));
1294 			if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
1295 				dev_err(dev,
1296 					"timed out in resp(caller: %pS) - polling\n",
1297 					(void *)_RET_IP_);
1298 				ret = -ETIMEDOUT;
1299 				scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
1300 			}
1301 		}
1302 
1303 		if (!ret) {
1304 			unsigned long flags;
1305 
1306 			/*
1307 			 * Do not fetch_response if an out-of-order delayed
1308 			 * response is being processed.
1309 			 */
1310 			spin_lock_irqsave(&xfer->lock, flags);
1311 			if (xfer->state == SCMI_XFER_SENT_OK) {
1312 				desc->ops->fetch_response(cinfo, xfer);
1313 				xfer->state = SCMI_XFER_RESP_OK;
1314 			}
1315 			spin_unlock_irqrestore(&xfer->lock, flags);
1316 
1317 			/* Trace polled replies. */
1318 			trace_scmi_msg_dump(info->id, cinfo->id,
1319 					    xfer->hdr.protocol_id, xfer->hdr.id,
1320 					    !SCMI_XFER_IS_RAW(xfer) ?
1321 					    "RESP" : "resp",
1322 					    xfer->hdr.seq, xfer->hdr.status,
1323 					    xfer->rx.buf, xfer->rx.len);
1324 			scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
1325 
1326 			if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1327 				scmi_raw_message_report(info->raw, xfer,
1328 							SCMI_RAW_REPLY_QUEUE,
1329 							cinfo->id);
1330 			}
1331 		}
1332 	} else {
1333 		/* And we wait for the response. */
1334 		if (!wait_for_completion_timeout(&xfer->done,
1335 						 msecs_to_jiffies(timeout_ms))) {
1336 			dev_err(dev, "timed out in resp(caller: %pS)\n",
1337 				(void *)_RET_IP_);
1338 			ret = -ETIMEDOUT;
1339 			scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
1340 		}
1341 	}
1342 
1343 	return ret;
1344 }
1345 
1346 /**
1347  * scmi_wait_for_message_response  - An helper to group all the possible ways of
1348  * waiting for a synchronous message response.
1349  *
1350  * @cinfo: SCMI channel info
1351  * @xfer: Reference to the transfer being waited for.
1352  *
1353  * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1354  * configuration flags like xfer->hdr.poll_completion.
1355  *
1356  * Return: 0 on Success, error otherwise.
1357  */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)1358 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1359 					  struct scmi_xfer *xfer)
1360 {
1361 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1362 	struct device *dev = info->dev;
1363 
1364 	trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1365 				      xfer->hdr.protocol_id, xfer->hdr.seq,
1366 				      info->desc->max_rx_timeout_ms,
1367 				      xfer->hdr.poll_completion);
1368 
1369 	return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1370 				   info->desc->max_rx_timeout_ms);
1371 }
1372 
1373 /**
1374  * scmi_xfer_raw_wait_for_message_response  - An helper to wait for a message
1375  * reply to an xfer raw request on a specific channel for the required timeout.
1376  *
1377  * @cinfo: SCMI channel info
1378  * @xfer: Reference to the transfer being waited for.
1379  * @timeout_ms: The maximum timeout in milliseconds
1380  *
1381  * Return: 0 on Success, error otherwise.
1382  */
scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1383 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1384 					    struct scmi_xfer *xfer,
1385 					    unsigned int timeout_ms)
1386 {
1387 	int ret;
1388 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1389 	struct device *dev = info->dev;
1390 
1391 	ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1392 	if (ret)
1393 		dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1394 			pack_scmi_header(&xfer->hdr));
1395 
1396 	return ret;
1397 }
1398 
1399 /**
1400  * do_xfer() - Do one transfer
1401  *
1402  * @ph: Pointer to SCMI protocol handle
1403  * @xfer: Transfer to initiate and wait for response
1404  *
1405  * Return: -ETIMEDOUT in case of no response, if transmit error,
1406  *	return corresponding error, else if all goes well,
1407  *	return 0.
1408  */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1409 static int do_xfer(const struct scmi_protocol_handle *ph,
1410 		   struct scmi_xfer *xfer)
1411 {
1412 	int ret;
1413 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1414 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1415 	struct device *dev = info->dev;
1416 	struct scmi_chan_info *cinfo;
1417 
1418 	/* Check for polling request on custom command xfers at first */
1419 	if (xfer->hdr.poll_completion &&
1420 	    !is_transport_polling_capable(info->desc)) {
1421 		dev_warn_once(dev,
1422 			      "Polling mode is not supported by transport.\n");
1423 		scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
1424 		return -EINVAL;
1425 	}
1426 
1427 	cinfo = idr_find(&info->tx_idr, pi->proto->id);
1428 	if (unlikely(!cinfo)) {
1429 		scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
1430 		return -EINVAL;
1431 	}
1432 	/* True ONLY if also supported by transport. */
1433 	if (is_polling_enabled(cinfo, info->desc))
1434 		xfer->hdr.poll_completion = true;
1435 
1436 	/*
1437 	 * Initialise protocol id now from protocol handle to avoid it being
1438 	 * overridden by mistake (or malice) by the protocol code mangling with
1439 	 * the scmi_xfer structure prior to this.
1440 	 */
1441 	xfer->hdr.protocol_id = pi->proto->id;
1442 	reinit_completion(&xfer->done);
1443 
1444 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1445 			      xfer->hdr.protocol_id, xfer->hdr.seq,
1446 			      xfer->hdr.poll_completion,
1447 			      scmi_inflight_count(&info->handle));
1448 
1449 	/* Clear any stale status */
1450 	xfer->hdr.status = SCMI_SUCCESS;
1451 	xfer->state = SCMI_XFER_SENT_OK;
1452 	/*
1453 	 * Even though spinlocking is not needed here since no race is possible
1454 	 * on xfer->state due to the monotonically increasing tokens allocation,
1455 	 * we must anyway ensure xfer->state initialization is not re-ordered
1456 	 * after the .send_message() to be sure that on the RX path an early
1457 	 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1458 	 */
1459 	smp_mb();
1460 
1461 	ret = info->desc->ops->send_message(cinfo, xfer);
1462 	if (ret < 0) {
1463 		dev_dbg(dev, "Failed to send message %d\n", ret);
1464 		scmi_inc_count(info->dbg->counters, SENT_FAIL);
1465 		return ret;
1466 	}
1467 
1468 	trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1469 			    xfer->hdr.id, "CMND", xfer->hdr.seq,
1470 			    xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1471 	scmi_inc_count(info->dbg->counters, SENT_OK);
1472 
1473 	ret = scmi_wait_for_message_response(cinfo, xfer);
1474 	if (!ret && xfer->hdr.status) {
1475 		ret = scmi_to_linux_errno(xfer->hdr.status);
1476 		scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
1477 	}
1478 
1479 	if (info->desc->ops->mark_txdone)
1480 		info->desc->ops->mark_txdone(cinfo, ret, xfer);
1481 
1482 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1483 			    xfer->hdr.protocol_id, xfer->hdr.seq, ret,
1484 			    scmi_inflight_count(&info->handle));
1485 
1486 	return ret;
1487 }
1488 
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1489 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1490 			      struct scmi_xfer *xfer)
1491 {
1492 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1493 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1494 
1495 	xfer->rx.len = info->desc->max_msg_size;
1496 }
1497 
1498 /**
1499  * do_xfer_with_response() - Do one transfer and wait until the delayed
1500  *	response is received
1501  *
1502  * @ph: Pointer to SCMI protocol handle
1503  * @xfer: Transfer to initiate and wait for response
1504  *
1505  * Using asynchronous commands in atomic/polling mode should be avoided since
1506  * it could cause long busy-waiting here, so ignore polling for the delayed
1507  * response and WARN if it was requested for this command transaction since
1508  * upper layers should refrain from issuing such kind of requests.
1509  *
1510  * The only other option would have been to refrain from using any asynchronous
1511  * command even if made available, when an atomic transport is detected, and
1512  * instead forcibly use the synchronous version (thing that can be easily
1513  * attained at the protocol layer), but this would also have led to longer
1514  * stalls of the channel for synchronous commands and possibly timeouts.
1515  * (in other words there is usually a good reason if a platform provides an
1516  *  asynchronous version of a command and we should prefer to use it...just not
1517  *  when using atomic/polling mode)
1518  *
1519  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1520  *	return corresponding error, else if all goes well, return 0.
1521  */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1522 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1523 				 struct scmi_xfer *xfer)
1524 {
1525 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1526 	DECLARE_COMPLETION_ONSTACK(async_response);
1527 
1528 	xfer->async_done = &async_response;
1529 
1530 	/*
1531 	 * Delayed responses should not be polled, so an async command should
1532 	 * not have been used when requiring an atomic/poll context; WARN and
1533 	 * perform instead a sleeping wait.
1534 	 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1535 	 */
1536 	WARN_ON_ONCE(xfer->hdr.poll_completion);
1537 
1538 	ret = do_xfer(ph, xfer);
1539 	if (!ret) {
1540 		if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1541 			dev_err(ph->dev,
1542 				"timed out in delayed resp(caller: %pS)\n",
1543 				(void *)_RET_IP_);
1544 			ret = -ETIMEDOUT;
1545 		} else if (xfer->hdr.status) {
1546 			ret = scmi_to_linux_errno(xfer->hdr.status);
1547 		}
1548 	}
1549 
1550 	xfer->async_done = NULL;
1551 	return ret;
1552 }
1553 
1554 /**
1555  * xfer_get_init() - Allocate and initialise one message for transmit
1556  *
1557  * @ph: Pointer to SCMI protocol handle
1558  * @msg_id: Message identifier
1559  * @tx_size: transmit message size
1560  * @rx_size: receive message size
1561  * @p: pointer to the allocated and initialised message
1562  *
1563  * This function allocates the message using @scmi_xfer_get and
1564  * initialise the header.
1565  *
1566  * Return: 0 if all went fine with @p pointing to message, else
1567  *	corresponding error.
1568  */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1569 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1570 			 u8 msg_id, size_t tx_size, size_t rx_size,
1571 			 struct scmi_xfer **p)
1572 {
1573 	int ret;
1574 	struct scmi_xfer *xfer;
1575 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1576 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1577 	struct scmi_xfers_info *minfo = &info->tx_minfo;
1578 	struct device *dev = info->dev;
1579 
1580 	/* Ensure we have sane transfer sizes */
1581 	if (rx_size > info->desc->max_msg_size ||
1582 	    tx_size > info->desc->max_msg_size)
1583 		return -ERANGE;
1584 
1585 	xfer = scmi_xfer_get(pi->handle, minfo);
1586 	if (IS_ERR(xfer)) {
1587 		ret = PTR_ERR(xfer);
1588 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
1589 		return ret;
1590 	}
1591 
1592 	/* Pick a sequence number and register this xfer as in-flight */
1593 	ret = scmi_xfer_pending_set(xfer, minfo);
1594 	if (ret) {
1595 		dev_err(pi->handle->dev,
1596 			"Failed to get monotonic token %d\n", ret);
1597 		__scmi_xfer_put(minfo, xfer);
1598 		return ret;
1599 	}
1600 
1601 	xfer->tx.len = tx_size;
1602 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1603 	xfer->hdr.type = MSG_TYPE_COMMAND;
1604 	xfer->hdr.id = msg_id;
1605 	xfer->hdr.poll_completion = false;
1606 
1607 	*p = xfer;
1608 
1609 	return 0;
1610 }
1611 
1612 /**
1613  * version_get() - command to get the revision of the SCMI entity
1614  *
1615  * @ph: Pointer to SCMI protocol handle
1616  * @version: Holds returned version of protocol.
1617  *
1618  * Updates the SCMI information in the internal data structure.
1619  *
1620  * Return: 0 if all went fine, else return appropriate error.
1621  */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1622 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1623 {
1624 	int ret;
1625 	__le32 *rev_info;
1626 	struct scmi_xfer *t;
1627 
1628 	ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1629 	if (ret)
1630 		return ret;
1631 
1632 	ret = do_xfer(ph, t);
1633 	if (!ret) {
1634 		rev_info = t->rx.buf;
1635 		*version = le32_to_cpu(*rev_info);
1636 	}
1637 
1638 	xfer_put(ph, t);
1639 	return ret;
1640 }
1641 
1642 /**
1643  * scmi_set_protocol_priv  - Set protocol specific data at init time
1644  *
1645  * @ph: A reference to the protocol handle.
1646  * @priv: The private data to set.
1647  * @version: The detected protocol version for the core to register.
1648  *
1649  * Return: 0 on Success
1650  */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv,u32 version)1651 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1652 				  void *priv, u32 version)
1653 {
1654 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
1655 
1656 	pi->priv = priv;
1657 	pi->version = version;
1658 
1659 	return 0;
1660 }
1661 
1662 /**
1663  * scmi_get_protocol_priv  - Set protocol specific data at init time
1664  *
1665  * @ph: A reference to the protocol handle.
1666  *
1667  * Return: Protocol private data if any was set.
1668  */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1669 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1670 {
1671 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1672 
1673 	return pi->priv;
1674 }
1675 
1676 static const struct scmi_xfer_ops xfer_ops = {
1677 	.version_get = version_get,
1678 	.xfer_get_init = xfer_get_init,
1679 	.reset_rx_to_maxsz = reset_rx_to_maxsz,
1680 	.do_xfer = do_xfer,
1681 	.do_xfer_with_response = do_xfer_with_response,
1682 	.xfer_put = xfer_put,
1683 };
1684 
1685 struct scmi_msg_resp_domain_name_get {
1686 	__le32 flags;
1687 	u8 name[SCMI_MAX_STR_SIZE];
1688 };
1689 
1690 /**
1691  * scmi_common_extended_name_get  - Common helper to get extended resources name
1692  * @ph: A protocol handle reference.
1693  * @cmd_id: The specific command ID to use.
1694  * @res_id: The specific resource ID to use.
1695  * @flags: A pointer to specific flags to use, if any.
1696  * @name: A pointer to the preallocated area where the retrieved name will be
1697  *	  stored as a NULL terminated string.
1698  * @len: The len in bytes of the @name char array.
1699  *
1700  * Return: 0 on Succcess
1701  */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,u32 * flags,char * name,size_t len)1702 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1703 					 u8 cmd_id, u32 res_id, u32 *flags,
1704 					 char *name, size_t len)
1705 {
1706 	int ret;
1707 	size_t txlen;
1708 	struct scmi_xfer *t;
1709 	struct scmi_msg_resp_domain_name_get *resp;
1710 
1711 	txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1712 	ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1713 	if (ret)
1714 		goto out;
1715 
1716 	put_unaligned_le32(res_id, t->tx.buf);
1717 	if (flags)
1718 		put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1719 	resp = t->rx.buf;
1720 
1721 	ret = ph->xops->do_xfer(ph, t);
1722 	if (!ret)
1723 		strscpy(name, resp->name, len);
1724 
1725 	ph->xops->xfer_put(ph, t);
1726 out:
1727 	if (ret)
1728 		dev_warn(ph->dev,
1729 			 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1730 			 res_id, ret, name);
1731 	return ret;
1732 }
1733 
1734 /**
1735  * scmi_common_get_max_msg_size  - Get maximum message size
1736  * @ph: A protocol handle reference.
1737  *
1738  * Return: Maximum message size for the current protocol.
1739  */
scmi_common_get_max_msg_size(const struct scmi_protocol_handle * ph)1740 static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1741 {
1742 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1743 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
1744 
1745 	return info->desc->max_msg_size;
1746 }
1747 
1748 /**
1749  * scmi_protocol_msg_check  - Check protocol message attributes
1750  *
1751  * @ph: A reference to the protocol handle.
1752  * @message_id: The ID of the message to check.
1753  * @attributes: A parameter to optionally return the retrieved message
1754  *		attributes, in case of Success.
1755  *
1756  * An helper to check protocol message attributes for a specific protocol
1757  * and message pair.
1758  *
1759  * Return: 0 on SUCCESS
1760  */
scmi_protocol_msg_check(const struct scmi_protocol_handle * ph,u32 message_id,u32 * attributes)1761 static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1762 				   u32 message_id, u32 *attributes)
1763 {
1764 	int ret;
1765 	struct scmi_xfer *t;
1766 
1767 	ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1768 			    sizeof(__le32), 0, &t);
1769 	if (ret)
1770 		return ret;
1771 
1772 	put_unaligned_le32(message_id, t->tx.buf);
1773 	ret = do_xfer(ph, t);
1774 	if (!ret && attributes)
1775 		*attributes = get_unaligned_le32(t->rx.buf);
1776 	xfer_put(ph, t);
1777 
1778 	return ret;
1779 }
1780 
1781 /**
1782  * struct scmi_iterator  - Iterator descriptor
1783  * @msg: A reference to the message TX buffer; filled by @prepare_message with
1784  *	 a proper custom command payload for each multi-part command request.
1785  * @resp: A reference to the response RX buffer; used by @update_state and
1786  *	  @process_response to parse the multi-part replies.
1787  * @t: A reference to the underlying xfer initialized and used transparently by
1788  *     the iterator internal routines.
1789  * @ph: A reference to the associated protocol handle to be used.
1790  * @ops: A reference to the custom provided iterator operations.
1791  * @state: The current iterator state; used and updated in turn by the iterators
1792  *	   internal routines and by the caller-provided @scmi_iterator_ops.
1793  * @priv: A reference to optional private data as provided by the caller and
1794  *	  passed back to the @@scmi_iterator_ops.
1795  */
1796 struct scmi_iterator {
1797 	void *msg;
1798 	void *resp;
1799 	struct scmi_xfer *t;
1800 	const struct scmi_protocol_handle *ph;
1801 	struct scmi_iterator_ops *ops;
1802 	struct scmi_iterator_state state;
1803 	void *priv;
1804 };
1805 
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1806 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1807 				struct scmi_iterator_ops *ops,
1808 				unsigned int max_resources, u8 msg_id,
1809 				size_t tx_size, void *priv)
1810 {
1811 	int ret;
1812 	struct scmi_iterator *i;
1813 
1814 	i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1815 	if (!i)
1816 		return ERR_PTR(-ENOMEM);
1817 
1818 	i->ph = ph;
1819 	i->ops = ops;
1820 	i->priv = priv;
1821 
1822 	ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1823 	if (ret) {
1824 		devm_kfree(ph->dev, i);
1825 		return ERR_PTR(ret);
1826 	}
1827 
1828 	i->state.max_resources = max_resources;
1829 	i->msg = i->t->tx.buf;
1830 	i->resp = i->t->rx.buf;
1831 
1832 	return i;
1833 }
1834 
scmi_iterator_run(void * iter)1835 static int scmi_iterator_run(void *iter)
1836 {
1837 	int ret = -EINVAL;
1838 	struct scmi_iterator_ops *iops;
1839 	const struct scmi_protocol_handle *ph;
1840 	struct scmi_iterator_state *st;
1841 	struct scmi_iterator *i = iter;
1842 
1843 	if (!i || !i->ops || !i->ph)
1844 		return ret;
1845 
1846 	iops = i->ops;
1847 	ph = i->ph;
1848 	st = &i->state;
1849 
1850 	do {
1851 		iops->prepare_message(i->msg, st->desc_index, i->priv);
1852 		ret = ph->xops->do_xfer(ph, i->t);
1853 		if (ret)
1854 			break;
1855 
1856 		st->rx_len = i->t->rx.len;
1857 		ret = iops->update_state(st, i->resp, i->priv);
1858 		if (ret)
1859 			break;
1860 
1861 		if (st->num_returned > st->max_resources - st->desc_index) {
1862 			dev_err(ph->dev,
1863 				"No. of resources can't exceed %d\n",
1864 				st->max_resources);
1865 			ret = -EINVAL;
1866 			break;
1867 		}
1868 
1869 		for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1870 		     st->loop_idx++) {
1871 			ret = iops->process_response(ph, i->resp, st, i->priv);
1872 			if (ret)
1873 				goto out;
1874 		}
1875 
1876 		st->desc_index += st->num_returned;
1877 		ph->xops->reset_rx_to_maxsz(ph, i->t);
1878 		/*
1879 		 * check for both returned and remaining to avoid infinite
1880 		 * loop due to buggy firmware
1881 		 */
1882 	} while (st->num_returned && st->num_remaining);
1883 
1884 out:
1885 	/* Finalize and destroy iterator */
1886 	ph->xops->xfer_put(ph, i->t);
1887 	devm_kfree(ph->dev, i);
1888 
1889 	return ret;
1890 }
1891 
1892 struct scmi_msg_get_fc_info {
1893 	__le32 domain;
1894 	__le32 message_id;
1895 };
1896 
1897 struct scmi_msg_resp_desc_fc {
1898 	__le32 attr;
1899 #define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
1900 #define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
1901 	__le32 rate_limit;
1902 	__le32 chan_addr_low;
1903 	__le32 chan_addr_high;
1904 	__le32 chan_size;
1905 	__le32 db_addr_low;
1906 	__le32 db_addr_high;
1907 	__le32 db_set_lmask;
1908 	__le32 db_set_hmask;
1909 	__le32 db_preserve_lmask;
1910 	__le32 db_preserve_hmask;
1911 };
1912 
1913 #define QUIRK_PERF_FC_FORCE						\
1914 	({								\
1915 		if (pi->proto->id == SCMI_PROTOCOL_PERF &&		\
1916 		    message_id == 0x8 /* PERF_LEVEL_GET */)		\
1917 			attributes |= BIT(0);				\
1918 	})
1919 
1920 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db,u32 * rate_limit)1921 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1922 			     u8 describe_id, u32 message_id, u32 valid_size,
1923 			     u32 domain, void __iomem **p_addr,
1924 			     struct scmi_fc_db_info **p_db, u32 *rate_limit)
1925 {
1926 	int ret;
1927 	u32 flags;
1928 	u64 phys_addr;
1929 	u32 attributes;
1930 	u8 size;
1931 	void __iomem *addr;
1932 	struct scmi_xfer *t;
1933 	struct scmi_fc_db_info *db = NULL;
1934 	struct scmi_msg_get_fc_info *info;
1935 	struct scmi_msg_resp_desc_fc *resp;
1936 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1937 
1938 	/* Check if the MSG_ID supports fastchannel */
1939 	ret = scmi_protocol_msg_check(ph, message_id, &attributes);
1940 	SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
1941 	if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
1942 		dev_dbg(ph->dev,
1943 			"Skip FC init for 0x%02X/%d  domain:%d - ret:%d\n",
1944 			pi->proto->id, message_id, domain, ret);
1945 		return;
1946 	}
1947 
1948 	if (!p_addr) {
1949 		ret = -EINVAL;
1950 		goto err_out;
1951 	}
1952 
1953 	ret = ph->xops->xfer_get_init(ph, describe_id,
1954 				      sizeof(*info), sizeof(*resp), &t);
1955 	if (ret)
1956 		goto err_out;
1957 
1958 	info = t->tx.buf;
1959 	info->domain = cpu_to_le32(domain);
1960 	info->message_id = cpu_to_le32(message_id);
1961 
1962 	/*
1963 	 * Bail out on error leaving fc_info addresses zeroed; this includes
1964 	 * the case in which the requested domain/message_id does NOT support
1965 	 * fastchannels at all.
1966 	 */
1967 	ret = ph->xops->do_xfer(ph, t);
1968 	if (ret)
1969 		goto err_xfer;
1970 
1971 	resp = t->rx.buf;
1972 	flags = le32_to_cpu(resp->attr);
1973 	size = le32_to_cpu(resp->chan_size);
1974 	if (size != valid_size) {
1975 		ret = -EINVAL;
1976 		goto err_xfer;
1977 	}
1978 
1979 	if (rate_limit)
1980 		*rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1981 
1982 	phys_addr = le32_to_cpu(resp->chan_addr_low);
1983 	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1984 	addr = devm_ioremap(ph->dev, phys_addr, size);
1985 	if (!addr) {
1986 		ret = -EADDRNOTAVAIL;
1987 		goto err_xfer;
1988 	}
1989 
1990 	*p_addr = addr;
1991 
1992 	if (p_db && SUPPORTS_DOORBELL(flags)) {
1993 		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1994 		if (!db) {
1995 			ret = -ENOMEM;
1996 			goto err_db;
1997 		}
1998 
1999 		size = 1 << DOORBELL_REG_WIDTH(flags);
2000 		phys_addr = le32_to_cpu(resp->db_addr_low);
2001 		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
2002 		addr = devm_ioremap(ph->dev, phys_addr, size);
2003 		if (!addr) {
2004 			ret = -EADDRNOTAVAIL;
2005 			goto err_db_mem;
2006 		}
2007 
2008 		db->addr = addr;
2009 		db->width = size;
2010 		db->set = le32_to_cpu(resp->db_set_lmask);
2011 		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
2012 		db->mask = le32_to_cpu(resp->db_preserve_lmask);
2013 		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
2014 
2015 		*p_db = db;
2016 	}
2017 
2018 	ph->xops->xfer_put(ph, t);
2019 
2020 	dev_dbg(ph->dev,
2021 		"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
2022 		pi->proto->id, message_id, domain);
2023 
2024 	return;
2025 
2026 err_db_mem:
2027 	devm_kfree(ph->dev, db);
2028 
2029 err_db:
2030 	*p_addr = NULL;
2031 
2032 err_xfer:
2033 	ph->xops->xfer_put(ph, t);
2034 
2035 err_out:
2036 	dev_warn(ph->dev,
2037 		 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
2038 		 pi->proto->id, message_id, domain, ret);
2039 }
2040 
2041 #define SCMI_PROTO_FC_RING_DB(w)			\
2042 do {							\
2043 	u##w val = 0;					\
2044 							\
2045 	if (db->mask)					\
2046 		val = ioread##w(db->addr) & db->mask;	\
2047 	iowrite##w((u##w)db->set | val, db->addr);	\
2048 } while (0)
2049 
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)2050 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
2051 {
2052 	if (!db || !db->addr)
2053 		return;
2054 
2055 	if (db->width == 1)
2056 		SCMI_PROTO_FC_RING_DB(8);
2057 	else if (db->width == 2)
2058 		SCMI_PROTO_FC_RING_DB(16);
2059 	else if (db->width == 4)
2060 		SCMI_PROTO_FC_RING_DB(32);
2061 	else /* db->width == 8 */
2062 		SCMI_PROTO_FC_RING_DB(64);
2063 }
2064 
2065 static const struct scmi_proto_helpers_ops helpers_ops = {
2066 	.extended_name_get = scmi_common_extended_name_get,
2067 	.get_max_msg_size = scmi_common_get_max_msg_size,
2068 	.iter_response_init = scmi_iterator_init,
2069 	.iter_response_run = scmi_iterator_run,
2070 	.protocol_msg_check = scmi_protocol_msg_check,
2071 	.fastchannel_init = scmi_common_fastchannel_init,
2072 	.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
2073 };
2074 
2075 /**
2076  * scmi_revision_area_get  - Retrieve version memory area.
2077  *
2078  * @ph: A reference to the protocol handle.
2079  *
2080  * A helper to grab the version memory area reference during SCMI Base protocol
2081  * initialization.
2082  *
2083  * Return: A reference to the version memory area associated to the SCMI
2084  *	   instance underlying this protocol handle.
2085  */
2086 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)2087 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
2088 {
2089 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2090 
2091 	return pi->handle->version;
2092 }
2093 
2094 /**
2095  * scmi_protocol_version_negotiate  - Negotiate protocol version
2096  *
2097  * @ph: A reference to the protocol handle.
2098  *
2099  * An helper to negotiate a protocol version different from the latest
2100  * advertised as supported from the platform: on Success backward
2101  * compatibility is assured by the platform.
2102  *
2103  * Return: 0 on Success
2104  */
scmi_protocol_version_negotiate(struct scmi_protocol_handle * ph)2105 static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
2106 {
2107 	int ret;
2108 	struct scmi_xfer *t;
2109 	struct scmi_protocol_instance *pi = ph_to_pi(ph);
2110 
2111 	/* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2112 	ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
2113 	if (ret)
2114 		return ret;
2115 
2116 	/* ... then attempt protocol version negotiation */
2117 	ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
2118 			    sizeof(__le32), 0, &t);
2119 	if (ret)
2120 		return ret;
2121 
2122 	put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2123 	ret = do_xfer(ph, t);
2124 	if (!ret)
2125 		pi->negotiated_version = pi->proto->supported_version;
2126 
2127 	xfer_put(ph, t);
2128 
2129 	return ret;
2130 }
2131 
2132 /**
2133  * scmi_alloc_init_protocol_instance  - Allocate and initialize a protocol
2134  * instance descriptor.
2135  * @info: The reference to the related SCMI instance.
2136  * @proto: The protocol descriptor.
2137  *
2138  * Allocate a new protocol instance descriptor, using the provided @proto
2139  * description, against the specified SCMI instance @info, and initialize it;
2140  * all resources management is handled via a dedicated per-protocol devres
2141  * group.
2142  *
2143  * Context: Assumes to be called with @protocols_mtx already acquired.
2144  * Return: A reference to a freshly allocated and initialized protocol instance
2145  *	   or ERR_PTR on failure. On failure the @proto reference is at first
2146  *	   put using @scmi_protocol_put() before releasing all the devres group.
2147  */
2148 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)2149 scmi_alloc_init_protocol_instance(struct scmi_info *info,
2150 				  const struct scmi_protocol *proto)
2151 {
2152 	int ret = -ENOMEM;
2153 	void *gid;
2154 	struct scmi_protocol_instance *pi;
2155 	const struct scmi_handle *handle = &info->handle;
2156 
2157 	/* Protocol specific devres group */
2158 	gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2159 	if (!gid) {
2160 		scmi_protocol_put(proto);
2161 		goto out;
2162 	}
2163 
2164 	pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2165 	if (!pi)
2166 		goto clean;
2167 
2168 	pi->gid = gid;
2169 	pi->proto = proto;
2170 	pi->handle = handle;
2171 	pi->ph.dev = handle->dev;
2172 	pi->ph.xops = &xfer_ops;
2173 	pi->ph.hops = &helpers_ops;
2174 	pi->ph.set_priv = scmi_set_protocol_priv;
2175 	pi->ph.get_priv = scmi_get_protocol_priv;
2176 	refcount_set(&pi->users, 1);
2177 	/* proto->init is assured NON NULL by scmi_protocol_register */
2178 	ret = pi->proto->instance_init(&pi->ph);
2179 	if (ret)
2180 		goto clean;
2181 
2182 	ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2183 			GFP_KERNEL);
2184 	if (ret != proto->id)
2185 		goto clean;
2186 
2187 	/*
2188 	 * Warn but ignore events registration errors since we do not want
2189 	 * to skip whole protocols if their notifications are messed up.
2190 	 */
2191 	if (pi->proto->events) {
2192 		ret = scmi_register_protocol_events(handle, pi->proto->id,
2193 						    &pi->ph,
2194 						    pi->proto->events);
2195 		if (ret)
2196 			dev_warn(handle->dev,
2197 				 "Protocol:%X - Events Registration Failed - err:%d\n",
2198 				 pi->proto->id, ret);
2199 	}
2200 
2201 	devres_close_group(handle->dev, pi->gid);
2202 	dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2203 
2204 	if (pi->version > proto->supported_version) {
2205 		ret = scmi_protocol_version_negotiate(&pi->ph);
2206 		if (!ret) {
2207 			dev_info(handle->dev,
2208 				 "Protocol 0x%X successfully negotiated version 0x%X\n",
2209 				 proto->id, pi->negotiated_version);
2210 		} else {
2211 			dev_warn(handle->dev,
2212 				 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2213 				 pi->version, pi->proto->id);
2214 			dev_warn(handle->dev,
2215 				 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
2216 				 pi->proto->supported_version);
2217 		}
2218 	}
2219 
2220 	return pi;
2221 
2222 clean:
2223 	/* Take care to put the protocol module's owner before releasing all */
2224 	scmi_protocol_put(proto);
2225 	devres_release_group(handle->dev, gid);
2226 out:
2227 	return ERR_PTR(ret);
2228 }
2229 
2230 /**
2231  * scmi_get_protocol_instance  - Protocol initialization helper.
2232  * @handle: A reference to the SCMI platform instance.
2233  * @protocol_id: The protocol being requested.
2234  *
2235  * In case the required protocol has never been requested before for this
2236  * instance, allocate and initialize all the needed structures while handling
2237  * resource allocation with a dedicated per-protocol devres subgroup.
2238  *
2239  * Return: A reference to an initialized protocol instance or error on failure:
2240  *	   in particular returns -EPROBE_DEFER when the desired protocol could
2241  *	   NOT be found.
2242  */
2243 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)2244 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
2245 {
2246 	struct scmi_protocol_instance *pi;
2247 	struct scmi_info *info = handle_to_scmi_info(handle);
2248 
2249 	mutex_lock(&info->protocols_mtx);
2250 	pi = idr_find(&info->protocols, protocol_id);
2251 
2252 	if (pi) {
2253 		refcount_inc(&pi->users);
2254 	} else {
2255 		const struct scmi_protocol *proto;
2256 
2257 		/* Fails if protocol not registered on bus */
2258 		proto = scmi_protocol_get(protocol_id, &info->version);
2259 		if (proto)
2260 			pi = scmi_alloc_init_protocol_instance(info, proto);
2261 		else
2262 			pi = ERR_PTR(-EPROBE_DEFER);
2263 	}
2264 	mutex_unlock(&info->protocols_mtx);
2265 
2266 	return pi;
2267 }
2268 
2269 /**
2270  * scmi_protocol_acquire  - Protocol acquire
2271  * @handle: A reference to the SCMI platform instance.
2272  * @protocol_id: The protocol being requested.
2273  *
2274  * Register a new user for the requested protocol on the specified SCMI
2275  * platform instance, possibly triggering its initialization on first user.
2276  *
2277  * Return: 0 if protocol was acquired successfully.
2278  */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)2279 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2280 {
2281 	return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2282 }
2283 
2284 /**
2285  * scmi_protocol_release  - Protocol de-initialization helper.
2286  * @handle: A reference to the SCMI platform instance.
2287  * @protocol_id: The protocol being requested.
2288  *
2289  * Remove one user for the specified protocol and triggers de-initialization
2290  * and resources de-allocation once the last user has gone.
2291  */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)2292 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2293 {
2294 	struct scmi_info *info = handle_to_scmi_info(handle);
2295 	struct scmi_protocol_instance *pi;
2296 
2297 	mutex_lock(&info->protocols_mtx);
2298 	pi = idr_find(&info->protocols, protocol_id);
2299 	if (WARN_ON(!pi))
2300 		goto out;
2301 
2302 	if (refcount_dec_and_test(&pi->users)) {
2303 		void *gid = pi->gid;
2304 
2305 		if (pi->proto->events)
2306 			scmi_deregister_protocol_events(handle, protocol_id);
2307 
2308 		if (pi->proto->instance_deinit)
2309 			pi->proto->instance_deinit(&pi->ph);
2310 
2311 		idr_remove(&info->protocols, protocol_id);
2312 
2313 		scmi_protocol_put(pi->proto);
2314 
2315 		devres_release_group(handle->dev, gid);
2316 		dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2317 			protocol_id);
2318 	}
2319 
2320 out:
2321 	mutex_unlock(&info->protocols_mtx);
2322 }
2323 
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)2324 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2325 				     u8 *prot_imp)
2326 {
2327 	const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2328 	struct scmi_info *info = handle_to_scmi_info(pi->handle);
2329 
2330 	info->protocols_imp = prot_imp;
2331 }
2332 
2333 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)2334 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2335 {
2336 	int i;
2337 	struct scmi_info *info = handle_to_scmi_info(handle);
2338 	struct scmi_revision_info *rev = handle->version;
2339 
2340 	if (!info->protocols_imp)
2341 		return false;
2342 
2343 	for (i = 0; i < rev->num_protocols; i++)
2344 		if (info->protocols_imp[i] == prot_id)
2345 			return true;
2346 	return false;
2347 }
2348 
2349 struct scmi_protocol_devres {
2350 	const struct scmi_handle *handle;
2351 	u8 protocol_id;
2352 };
2353 
scmi_devm_release_protocol(struct device * dev,void * res)2354 static void scmi_devm_release_protocol(struct device *dev, void *res)
2355 {
2356 	struct scmi_protocol_devres *dres = res;
2357 
2358 	scmi_protocol_release(dres->handle, dres->protocol_id);
2359 }
2360 
2361 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)2362 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2363 {
2364 	struct scmi_protocol_instance *pi;
2365 	struct scmi_protocol_devres *dres;
2366 
2367 	dres = devres_alloc(scmi_devm_release_protocol,
2368 			    sizeof(*dres), GFP_KERNEL);
2369 	if (!dres)
2370 		return ERR_PTR(-ENOMEM);
2371 
2372 	pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2373 	if (IS_ERR(pi)) {
2374 		devres_free(dres);
2375 		return pi;
2376 	}
2377 
2378 	dres->handle = sdev->handle;
2379 	dres->protocol_id = protocol_id;
2380 	devres_add(&sdev->dev, dres);
2381 
2382 	return pi;
2383 }
2384 
2385 /**
2386  * scmi_devm_protocol_get  - Devres managed get protocol operations and handle
2387  * @sdev: A reference to an scmi_device whose embedded struct device is to
2388  *	  be used for devres accounting.
2389  * @protocol_id: The protocol being requested.
2390  * @ph: A pointer reference used to pass back the associated protocol handle.
2391  *
2392  * Get hold of a protocol accounting for its usage, eventually triggering its
2393  * initialization, and returning the protocol specific operations and related
2394  * protocol handle which will be used as first argument in most of the
2395  * protocols operations methods.
2396  * Being a devres based managed method, protocol hold will be automatically
2397  * released, and possibly de-initialized on last user, once the SCMI driver
2398  * owning the scmi_device is unbound from it.
2399  *
2400  * Return: A reference to the requested protocol operations or error.
2401  *	   Must be checked for errors by caller.
2402  */
2403 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)2404 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2405 		       struct scmi_protocol_handle **ph)
2406 {
2407 	struct scmi_protocol_instance *pi;
2408 
2409 	if (!ph)
2410 		return ERR_PTR(-EINVAL);
2411 
2412 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2413 	if (IS_ERR(pi))
2414 		return pi;
2415 
2416 	*ph = &pi->ph;
2417 
2418 	return pi->proto->ops;
2419 }
2420 
2421 /**
2422  * scmi_devm_protocol_acquire  - Devres managed helper to get hold of a protocol
2423  * @sdev: A reference to an scmi_device whose embedded struct device is to
2424  *	  be used for devres accounting.
2425  * @protocol_id: The protocol being requested.
2426  *
2427  * Get hold of a protocol accounting for its usage, possibly triggering its
2428  * initialization but without getting access to its protocol specific operations
2429  * and handle.
2430  *
2431  * Being a devres based managed method, protocol hold will be automatically
2432  * released, and possibly de-initialized on last user, once the SCMI driver
2433  * owning the scmi_device is unbound from it.
2434  *
2435  * Return: 0 on SUCCESS
2436  */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)2437 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2438 						   u8 protocol_id)
2439 {
2440 	struct scmi_protocol_instance *pi;
2441 
2442 	pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2443 	if (IS_ERR(pi))
2444 		return PTR_ERR(pi);
2445 
2446 	return 0;
2447 }
2448 
scmi_devm_protocol_match(struct device * dev,void * res,void * data)2449 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2450 {
2451 	struct scmi_protocol_devres *dres = res;
2452 
2453 	if (WARN_ON(!dres || !data))
2454 		return 0;
2455 
2456 	return dres->protocol_id == *((u8 *)data);
2457 }
2458 
2459 /**
2460  * scmi_devm_protocol_put  - Devres managed put protocol operations and handle
2461  * @sdev: A reference to an scmi_device whose embedded struct device is to
2462  *	  be used for devres accounting.
2463  * @protocol_id: The protocol being requested.
2464  *
2465  * Explicitly release a protocol hold previously obtained calling the above
2466  * @scmi_devm_protocol_get.
2467  */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)2468 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2469 {
2470 	int ret;
2471 
2472 	ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2473 			     scmi_devm_protocol_match, &protocol_id);
2474 	WARN_ON(ret);
2475 }
2476 
2477 /**
2478  * scmi_is_transport_atomic  - Method to check if underlying transport for an
2479  * SCMI instance is configured as atomic.
2480  *
2481  * @handle: A reference to the SCMI platform instance.
2482  * @atomic_threshold: An optional return value for the system wide currently
2483  *		      configured threshold for atomic operations.
2484  *
2485  * Return: True if transport is configured as atomic
2486  */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)2487 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2488 				     unsigned int *atomic_threshold)
2489 {
2490 	bool ret;
2491 	struct scmi_info *info = handle_to_scmi_info(handle);
2492 
2493 	ret = info->desc->atomic_enabled &&
2494 		is_transport_polling_capable(info->desc);
2495 	if (ret && atomic_threshold)
2496 		*atomic_threshold = info->desc->atomic_threshold;
2497 
2498 	return ret;
2499 }
2500 
2501 /**
2502  * scmi_handle_get() - Get the SCMI handle for a device
2503  *
2504  * @dev: pointer to device for which we want SCMI handle
2505  *
2506  * NOTE: The function does not track individual clients of the framework
2507  * and is expected to be maintained by caller of SCMI protocol library.
2508  * scmi_handle_put must be balanced with successful scmi_handle_get
2509  *
2510  * Return: pointer to handle if successful, NULL on error
2511  */
scmi_handle_get(struct device * dev)2512 static struct scmi_handle *scmi_handle_get(struct device *dev)
2513 {
2514 	struct list_head *p;
2515 	struct scmi_info *info;
2516 	struct scmi_handle *handle = NULL;
2517 
2518 	mutex_lock(&scmi_list_mutex);
2519 	list_for_each(p, &scmi_list) {
2520 		info = list_entry(p, struct scmi_info, node);
2521 		if (dev->parent == info->dev) {
2522 			info->users++;
2523 			handle = &info->handle;
2524 			break;
2525 		}
2526 	}
2527 	mutex_unlock(&scmi_list_mutex);
2528 
2529 	return handle;
2530 }
2531 
2532 /**
2533  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2534  *
2535  * @handle: handle acquired by scmi_handle_get
2536  *
2537  * NOTE: The function does not track individual clients of the framework
2538  * and is expected to be maintained by caller of SCMI protocol library.
2539  * scmi_handle_put must be balanced with successful scmi_handle_get
2540  *
2541  * Return: 0 is successfully released
2542  *	if null was passed, it returns -EINVAL;
2543  */
scmi_handle_put(const struct scmi_handle * handle)2544 static int scmi_handle_put(const struct scmi_handle *handle)
2545 {
2546 	struct scmi_info *info;
2547 
2548 	if (!handle)
2549 		return -EINVAL;
2550 
2551 	info = handle_to_scmi_info(handle);
2552 	mutex_lock(&scmi_list_mutex);
2553 	if (!WARN_ON(!info->users))
2554 		info->users--;
2555 	mutex_unlock(&scmi_list_mutex);
2556 
2557 	return 0;
2558 }
2559 
scmi_device_link_add(struct device * consumer,struct device * supplier)2560 static void scmi_device_link_add(struct device *consumer,
2561 				 struct device *supplier)
2562 {
2563 	struct device_link *link;
2564 
2565 	link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2566 
2567 	WARN_ON(!link);
2568 }
2569 
scmi_set_handle(struct scmi_device * scmi_dev)2570 static void scmi_set_handle(struct scmi_device *scmi_dev)
2571 {
2572 	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2573 	if (scmi_dev->handle)
2574 		scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2575 }
2576 
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)2577 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2578 				 struct scmi_xfers_info *info)
2579 {
2580 	int i;
2581 	struct scmi_xfer *xfer;
2582 	struct device *dev = sinfo->dev;
2583 	const struct scmi_desc *desc = sinfo->desc;
2584 
2585 	/* Pre-allocated messages, no more than what hdr.seq can support */
2586 	if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2587 		dev_err(dev,
2588 			"Invalid maximum messages %d, not in range [1 - %lu]\n",
2589 			info->max_msg, MSG_TOKEN_MAX);
2590 		return -EINVAL;
2591 	}
2592 
2593 	hash_init(info->pending_xfers);
2594 
2595 	/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2596 	info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2597 						    GFP_KERNEL);
2598 	if (!info->xfer_alloc_table)
2599 		return -ENOMEM;
2600 
2601 	/*
2602 	 * Preallocate a number of xfers equal to max inflight messages,
2603 	 * pre-initialize the buffer pointer to pre-allocated buffers and
2604 	 * attach all of them to the free list
2605 	 */
2606 	INIT_HLIST_HEAD(&info->free_xfers);
2607 	for (i = 0; i < info->max_msg; i++) {
2608 		xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2609 		if (!xfer)
2610 			return -ENOMEM;
2611 
2612 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2613 					    GFP_KERNEL);
2614 		if (!xfer->rx.buf)
2615 			return -ENOMEM;
2616 
2617 		xfer->tx.buf = xfer->rx.buf;
2618 		init_completion(&xfer->done);
2619 		spin_lock_init(&xfer->lock);
2620 
2621 		/* Add initialized xfer to the free list */
2622 		hlist_add_head(&xfer->node, &info->free_xfers);
2623 	}
2624 
2625 	spin_lock_init(&info->xfer_lock);
2626 
2627 	return 0;
2628 }
2629 
scmi_channels_max_msg_configure(struct scmi_info * sinfo)2630 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2631 {
2632 	const struct scmi_desc *desc = sinfo->desc;
2633 
2634 	if (!desc->ops->get_max_msg) {
2635 		sinfo->tx_minfo.max_msg = desc->max_msg;
2636 		sinfo->rx_minfo.max_msg = desc->max_msg;
2637 	} else {
2638 		struct scmi_chan_info *base_cinfo;
2639 
2640 		base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2641 		if (!base_cinfo)
2642 			return -EINVAL;
2643 		sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2644 
2645 		/* RX channel is optional so can be skipped */
2646 		base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2647 		if (base_cinfo)
2648 			sinfo->rx_minfo.max_msg =
2649 				desc->ops->get_max_msg(base_cinfo);
2650 	}
2651 
2652 	return 0;
2653 }
2654 
scmi_xfer_info_init(struct scmi_info * sinfo)2655 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2656 {
2657 	int ret;
2658 
2659 	ret = scmi_channels_max_msg_configure(sinfo);
2660 	if (ret)
2661 		return ret;
2662 
2663 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2664 	if (!ret && !idr_is_empty(&sinfo->rx_idr))
2665 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2666 
2667 	return ret;
2668 }
2669 
scmi_chan_setup(struct scmi_info * info,struct device_node * of_node,int prot_id,bool tx)2670 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2671 			   int prot_id, bool tx)
2672 {
2673 	int ret, idx;
2674 	char name[32];
2675 	struct scmi_chan_info *cinfo;
2676 	struct idr *idr;
2677 	struct scmi_device *tdev = NULL;
2678 
2679 	/* Transmit channel is first entry i.e. index 0 */
2680 	idx = tx ? 0 : 1;
2681 	idr = tx ? &info->tx_idr : &info->rx_idr;
2682 
2683 	if (!info->desc->ops->chan_available(of_node, idx)) {
2684 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2685 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2686 			return -EINVAL;
2687 		goto idr_alloc;
2688 	}
2689 
2690 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2691 	if (!cinfo)
2692 		return -ENOMEM;
2693 
2694 	cinfo->is_p2a = !tx;
2695 	cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2696 	cinfo->max_msg_size = info->desc->max_msg_size;
2697 
2698 	/* Create a unique name for this transport device */
2699 	snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2700 		 idx ? "rx" : "tx", prot_id);
2701 	/* Create a uniquely named, dedicated transport device for this chan */
2702 	tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2703 	if (!tdev) {
2704 		dev_err(info->dev,
2705 			"failed to create transport device (%s)\n", name);
2706 		devm_kfree(info->dev, cinfo);
2707 		return -EINVAL;
2708 	}
2709 	of_node_get(of_node);
2710 
2711 	cinfo->id = prot_id;
2712 	cinfo->dev = &tdev->dev;
2713 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2714 	if (ret) {
2715 		of_node_put(of_node);
2716 		scmi_device_destroy(info->dev, prot_id, name);
2717 		devm_kfree(info->dev, cinfo);
2718 		return ret;
2719 	}
2720 
2721 	if (tx && is_polling_required(cinfo, info->desc)) {
2722 		if (is_transport_polling_capable(info->desc))
2723 			dev_info(&tdev->dev,
2724 				 "Enabled polling mode TX channel - prot_id:%d\n",
2725 				 prot_id);
2726 		else
2727 			dev_warn(&tdev->dev,
2728 				 "Polling mode NOT supported by transport.\n");
2729 	}
2730 
2731 idr_alloc:
2732 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2733 	if (ret != prot_id) {
2734 		dev_err(info->dev,
2735 			"unable to allocate SCMI idr slot err %d\n", ret);
2736 		/* Destroy channel and device only if created by this call. */
2737 		if (tdev) {
2738 			of_node_put(of_node);
2739 			scmi_device_destroy(info->dev, prot_id, name);
2740 			devm_kfree(info->dev, cinfo);
2741 		}
2742 		return ret;
2743 	}
2744 
2745 	cinfo->handle = &info->handle;
2746 	return 0;
2747 }
2748 
2749 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device_node * of_node,int prot_id)2750 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2751 		int prot_id)
2752 {
2753 	int ret = scmi_chan_setup(info, of_node, prot_id, true);
2754 
2755 	if (!ret) {
2756 		/* Rx is optional, report only memory errors */
2757 		ret = scmi_chan_setup(info, of_node, prot_id, false);
2758 		if (ret && ret != -ENOMEM)
2759 			ret = 0;
2760 	}
2761 
2762 	if (ret)
2763 		dev_err(info->dev,
2764 			"failed to setup channel for protocol:0x%X\n", prot_id);
2765 
2766 	return ret;
2767 }
2768 
2769 /**
2770  * scmi_channels_setup  - Helper to initialize all required channels
2771  *
2772  * @info: The SCMI instance descriptor.
2773  *
2774  * Initialize all the channels found described in the DT against the underlying
2775  * configured transport using custom defined dedicated devices instead of
2776  * borrowing devices from the SCMI drivers; this way channels are initialized
2777  * upfront during core SCMI stack probing and are no more coupled with SCMI
2778  * devices used by SCMI drivers.
2779  *
2780  * Note that, even though a pair of TX/RX channels is associated to each
2781  * protocol defined in the DT, a distinct freshly initialized channel is
2782  * created only if the DT node for the protocol at hand describes a dedicated
2783  * channel: in all the other cases the common BASE protocol channel is reused.
2784  *
2785  * Return: 0 on Success
2786  */
scmi_channels_setup(struct scmi_info * info)2787 static int scmi_channels_setup(struct scmi_info *info)
2788 {
2789 	int ret;
2790 	struct device_node *top_np = info->dev->of_node;
2791 
2792 	/* Initialize a common generic channel at first */
2793 	ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2794 	if (ret)
2795 		return ret;
2796 
2797 	for_each_available_child_of_node_scoped(top_np, child) {
2798 		u32 prot_id;
2799 
2800 		if (of_property_read_u32(child, "reg", &prot_id))
2801 			continue;
2802 
2803 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2804 			dev_err(info->dev,
2805 				"Out of range protocol %d\n", prot_id);
2806 
2807 		ret = scmi_txrx_setup(info, child, prot_id);
2808 		if (ret)
2809 			return ret;
2810 	}
2811 
2812 	return 0;
2813 }
2814 
scmi_chan_destroy(int id,void * p,void * idr)2815 static int scmi_chan_destroy(int id, void *p, void *idr)
2816 {
2817 	struct scmi_chan_info *cinfo = p;
2818 
2819 	if (cinfo->dev) {
2820 		struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2821 		struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2822 
2823 		of_node_put(cinfo->dev->of_node);
2824 		scmi_device_destroy(info->dev, id, sdev->name);
2825 		cinfo->dev = NULL;
2826 	}
2827 
2828 	idr_remove(idr, id);
2829 
2830 	return 0;
2831 }
2832 
scmi_cleanup_channels(struct scmi_info * info,struct idr * idr)2833 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2834 {
2835 	/* At first free all channels at the transport layer ... */
2836 	idr_for_each(idr, info->desc->ops->chan_free, idr);
2837 
2838 	/* ...then destroy all underlying devices */
2839 	idr_for_each(idr, scmi_chan_destroy, idr);
2840 
2841 	idr_destroy(idr);
2842 }
2843 
scmi_cleanup_txrx_channels(struct scmi_info * info)2844 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2845 {
2846 	scmi_cleanup_channels(info, &info->tx_idr);
2847 
2848 	scmi_cleanup_channels(info, &info->rx_idr);
2849 }
2850 
scmi_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2851 static int scmi_bus_notifier(struct notifier_block *nb,
2852 			     unsigned long action, void *data)
2853 {
2854 	struct scmi_info *info = bus_nb_to_scmi_info(nb);
2855 	struct scmi_device *sdev = to_scmi_dev(data);
2856 
2857 	/* Skip devices of different SCMI instances */
2858 	if (sdev->dev.parent != info->dev)
2859 		return NOTIFY_DONE;
2860 
2861 	switch (action) {
2862 	case BUS_NOTIFY_BIND_DRIVER:
2863 		/* setup handle now as the transport is ready */
2864 		scmi_set_handle(sdev);
2865 		break;
2866 	case BUS_NOTIFY_UNBOUND_DRIVER:
2867 		scmi_handle_put(sdev->handle);
2868 		sdev->handle = NULL;
2869 		break;
2870 	default:
2871 		return NOTIFY_DONE;
2872 	}
2873 
2874 	dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2875 		sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2876 		"about to be BOUND." : "UNBOUND.");
2877 
2878 	return NOTIFY_OK;
2879 }
2880 
scmi_device_request_notifier(struct notifier_block * nb,unsigned long action,void * data)2881 static int scmi_device_request_notifier(struct notifier_block *nb,
2882 					unsigned long action, void *data)
2883 {
2884 	struct device_node *np;
2885 	struct scmi_device_id *id_table = data;
2886 	struct scmi_info *info = req_nb_to_scmi_info(nb);
2887 
2888 	np = idr_find(&info->active_protocols, id_table->protocol_id);
2889 	if (!np)
2890 		return NOTIFY_DONE;
2891 
2892 	dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2893 		action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2894 		id_table->name, id_table->protocol_id);
2895 
2896 	switch (action) {
2897 	case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2898 		scmi_create_protocol_devices(np, info, id_table->protocol_id,
2899 					     id_table->name);
2900 		break;
2901 	case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2902 		scmi_destroy_protocol_devices(info, id_table->protocol_id,
2903 					      id_table->name);
2904 		break;
2905 	default:
2906 		return NOTIFY_DONE;
2907 	}
2908 
2909 	return NOTIFY_OK;
2910 }
2911 
2912 static const char * const dbg_counter_strs[] = {
2913 	"sent_ok",
2914 	"sent_fail",
2915 	"sent_fail_polling_unsupported",
2916 	"sent_fail_channel_not_found",
2917 	"response_ok",
2918 	"notification_ok",
2919 	"delayed_response_ok",
2920 	"xfers_response_timeout",
2921 	"xfers_response_polled_timeout",
2922 	"response_polled_ok",
2923 	"err_msg_unexpected",
2924 	"err_msg_invalid",
2925 	"err_msg_nomem",
2926 	"err_protocol",
2927 	"xfers_inflight",
2928 };
2929 
reset_all_on_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)2930 static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
2931 				  size_t count, loff_t *ppos)
2932 {
2933 	struct scmi_debug_info *dbg = filp->private_data;
2934 
2935 	for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
2936 		atomic_set(&dbg->counters[i], 0);
2937 
2938 	return count;
2939 }
2940 
2941 static const struct file_operations fops_reset_counts = {
2942 	.owner = THIS_MODULE,
2943 	.open = simple_open,
2944 	.write = reset_all_on_write,
2945 };
2946 
scmi_debugfs_counters_setup(struct scmi_debug_info * dbg,struct dentry * trans)2947 static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
2948 					struct dentry *trans)
2949 {
2950 	struct dentry *counters;
2951 	int idx;
2952 
2953 	counters = debugfs_create_dir("counters", trans);
2954 
2955 	for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
2956 		debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
2957 					&dbg->counters[idx]);
2958 
2959 	debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
2960 }
2961 
scmi_debugfs_common_cleanup(void * d)2962 static void scmi_debugfs_common_cleanup(void *d)
2963 {
2964 	struct scmi_debug_info *dbg = d;
2965 
2966 	if (!dbg)
2967 		return;
2968 
2969 	debugfs_remove_recursive(dbg->top_dentry);
2970 	kfree(dbg->name);
2971 	kfree(dbg->type);
2972 }
2973 
scmi_debugfs_common_setup(struct scmi_info * info)2974 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2975 {
2976 	char top_dir[16];
2977 	struct dentry *trans, *top_dentry;
2978 	struct scmi_debug_info *dbg;
2979 	const char *c_ptr = NULL;
2980 
2981 	dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2982 	if (!dbg)
2983 		return NULL;
2984 
2985 	dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2986 	if (!dbg->name) {
2987 		devm_kfree(info->dev, dbg);
2988 		return NULL;
2989 	}
2990 
2991 	of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2992 	dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2993 	if (!dbg->type) {
2994 		kfree(dbg->name);
2995 		devm_kfree(info->dev, dbg);
2996 		return NULL;
2997 	}
2998 
2999 	snprintf(top_dir, 16, "%d", info->id);
3000 	top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
3001 	trans = debugfs_create_dir("transport", top_dentry);
3002 
3003 	dbg->is_atomic = info->desc->atomic_enabled &&
3004 				is_transport_polling_capable(info->desc);
3005 
3006 	debugfs_create_str("instance_name", 0400, top_dentry,
3007 			   (char **)&dbg->name);
3008 
3009 	debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
3010 			   (u32 *)&info->desc->atomic_threshold);
3011 
3012 	debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
3013 
3014 	debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
3015 
3016 	debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
3017 			   (u32 *)&info->desc->max_rx_timeout_ms);
3018 
3019 	debugfs_create_u32("max_msg_size", 0400, trans,
3020 			   (u32 *)&info->desc->max_msg_size);
3021 
3022 	debugfs_create_u32("tx_max_msg", 0400, trans,
3023 			   (u32 *)&info->tx_minfo.max_msg);
3024 
3025 	debugfs_create_u32("rx_max_msg", 0400, trans,
3026 			   (u32 *)&info->rx_minfo.max_msg);
3027 
3028 	if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
3029 		scmi_debugfs_counters_setup(dbg, trans);
3030 
3031 	dbg->top_dentry = top_dentry;
3032 
3033 	if (devm_add_action_or_reset(info->dev,
3034 				     scmi_debugfs_common_cleanup, dbg))
3035 		return NULL;
3036 
3037 	return dbg;
3038 }
3039 
scmi_debugfs_raw_mode_setup(struct scmi_info * info)3040 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
3041 {
3042 	int id, num_chans = 0, ret = 0;
3043 	struct scmi_chan_info *cinfo;
3044 	u8 channels[SCMI_MAX_CHANNELS] = {};
3045 	DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
3046 
3047 	if (!info->dbg)
3048 		return -EINVAL;
3049 
3050 	/* Enumerate all channels to collect their ids */
3051 	idr_for_each_entry(&info->tx_idr, cinfo, id) {
3052 		/*
3053 		 * Cannot happen, but be defensive.
3054 		 * Zero as num_chans is ok, warn and carry on.
3055 		 */
3056 		if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
3057 			dev_warn(info->dev,
3058 				 "SCMI RAW - Error enumerating channels\n");
3059 			break;
3060 		}
3061 
3062 		if (!test_bit(cinfo->id, protos)) {
3063 			channels[num_chans++] = cinfo->id;
3064 			set_bit(cinfo->id, protos);
3065 		}
3066 	}
3067 
3068 	info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3069 				       info->id, channels, num_chans,
3070 				       info->desc, info->tx_minfo.max_msg);
3071 	if (IS_ERR(info->raw)) {
3072 		dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3073 		ret = PTR_ERR(info->raw);
3074 		info->raw = NULL;
3075 	}
3076 
3077 	return ret;
3078 }
3079 
scmi_transport_setup(struct device * dev)3080 static const struct scmi_desc *scmi_transport_setup(struct device *dev)
3081 {
3082 	struct scmi_transport *trans;
3083 	int ret;
3084 
3085 	trans = dev_get_platdata(dev);
3086 	if (!trans || !trans->supplier || !trans->core_ops)
3087 		return NULL;
3088 
3089 	if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3090 		dev_err(dev,
3091 			"Adding link to supplier transport device failed\n");
3092 		return NULL;
3093 	}
3094 
3095 	/* Provide core transport ops */
3096 	*trans->core_ops = &scmi_trans_core_ops;
3097 
3098 	dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3099 
3100 	ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3101 				   &trans->desc.max_rx_timeout_ms);
3102 	if (ret && ret != -EINVAL)
3103 		dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3104 
3105 	ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3106 				   &trans->desc.max_msg_size);
3107 	if (ret && ret != -EINVAL)
3108 		dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3109 
3110 	ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3111 				   &trans->desc.max_msg);
3112 	if (ret && ret != -EINVAL)
3113 		dev_err(dev, "Malformed arm,max-msg DT property.\n");
3114 
3115 	dev_info(dev,
3116 		 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3117 		 trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
3118 		 trans->desc.max_msg);
3119 
3120 	/* System wide atomic threshold for atomic ops .. if any */
3121 	if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3122 				  &trans->desc.atomic_threshold))
3123 		dev_info(dev,
3124 			 "SCMI System wide atomic threshold set to %u us\n",
3125 			 trans->desc.atomic_threshold);
3126 
3127 	return &trans->desc;
3128 }
3129 
scmi_enable_matching_quirks(struct scmi_info * info)3130 static void scmi_enable_matching_quirks(struct scmi_info *info)
3131 {
3132 	struct scmi_revision_info *rev = &info->version;
3133 
3134 	dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
3135 		rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
3136 
3137 	/* Enable applicable quirks */
3138 	scmi_quirks_enable(info->dev, rev->vendor_id,
3139 			   rev->sub_vendor_id, rev->impl_ver);
3140 }
3141 
scmi_probe(struct platform_device * pdev)3142 static int scmi_probe(struct platform_device *pdev)
3143 {
3144 	int ret;
3145 	char *err_str = "probe failure\n";
3146 	struct scmi_handle *handle;
3147 	const struct scmi_desc *desc;
3148 	struct scmi_info *info;
3149 	bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
3150 	struct device *dev = &pdev->dev;
3151 	struct device_node *child, *np = dev->of_node;
3152 
3153 	desc = scmi_transport_setup(dev);
3154 	if (!desc) {
3155 		err_str = "transport invalid\n";
3156 		ret = -EINVAL;
3157 		goto out_err;
3158 	}
3159 
3160 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3161 	if (!info)
3162 		return -ENOMEM;
3163 
3164 	info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3165 	if (info->id < 0)
3166 		return info->id;
3167 
3168 	info->dev = dev;
3169 	info->desc = desc;
3170 	info->bus_nb.notifier_call = scmi_bus_notifier;
3171 	info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3172 	INIT_LIST_HEAD(&info->node);
3173 	idr_init(&info->protocols);
3174 	mutex_init(&info->protocols_mtx);
3175 	idr_init(&info->active_protocols);
3176 	mutex_init(&info->devreq_mtx);
3177 
3178 	platform_set_drvdata(pdev, info);
3179 	idr_init(&info->tx_idr);
3180 	idr_init(&info->rx_idr);
3181 
3182 	handle = &info->handle;
3183 	handle->dev = info->dev;
3184 	handle->version = &info->version;
3185 	handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3186 	handle->devm_protocol_get = scmi_devm_protocol_get;
3187 	handle->devm_protocol_put = scmi_devm_protocol_put;
3188 	handle->is_transport_atomic = scmi_is_transport_atomic;
3189 
3190 	/* Setup all channels described in the DT at first */
3191 	ret = scmi_channels_setup(info);
3192 	if (ret) {
3193 		err_str = "failed to setup channels\n";
3194 		goto clear_ida;
3195 	}
3196 
3197 	ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3198 	if (ret) {
3199 		err_str = "failed to register bus notifier\n";
3200 		goto clear_txrx_setup;
3201 	}
3202 
3203 	ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
3204 					       &info->dev_req_nb);
3205 	if (ret) {
3206 		err_str = "failed to register device notifier\n";
3207 		goto clear_bus_notifier;
3208 	}
3209 
3210 	ret = scmi_xfer_info_init(info);
3211 	if (ret) {
3212 		err_str = "failed to init xfers pool\n";
3213 		goto clear_dev_req_notifier;
3214 	}
3215 
3216 	if (scmi_top_dentry) {
3217 		info->dbg = scmi_debugfs_common_setup(info);
3218 		if (!info->dbg)
3219 			dev_warn(dev, "Failed to setup SCMI debugfs.\n");
3220 
3221 		if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
3222 			ret = scmi_debugfs_raw_mode_setup(info);
3223 			if (!coex) {
3224 				if (ret)
3225 					goto clear_dev_req_notifier;
3226 
3227 				/* Bail out anyway when coex disabled. */
3228 				return 0;
3229 			}
3230 
3231 			/* Coex enabled, carry on in any case. */
3232 			dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
3233 		}
3234 	}
3235 
3236 	if (scmi_notification_init(handle))
3237 		dev_err(dev, "SCMI Notifications NOT available.\n");
3238 
3239 	if (info->desc->atomic_enabled &&
3240 	    !is_transport_polling_capable(info->desc))
3241 		dev_err(dev,
3242 			"Transport is not polling capable. Atomic mode not supported.\n");
3243 
3244 	/*
3245 	 * Trigger SCMI Base protocol initialization.
3246 	 * It's mandatory and won't be ever released/deinit until the
3247 	 * SCMI stack is shutdown/unloaded as a whole.
3248 	 */
3249 	ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
3250 	if (ret) {
3251 		err_str = "unable to communicate with SCMI\n";
3252 		if (coex) {
3253 			dev_err(dev, "%s", err_str);
3254 			return 0;
3255 		}
3256 		goto notification_exit;
3257 	}
3258 
3259 	mutex_lock(&scmi_list_mutex);
3260 	list_add_tail(&info->node, &scmi_list);
3261 	mutex_unlock(&scmi_list_mutex);
3262 
3263 	scmi_enable_matching_quirks(info);
3264 
3265 	for_each_available_child_of_node(np, child) {
3266 		u32 prot_id;
3267 
3268 		if (of_property_read_u32(child, "reg", &prot_id))
3269 			continue;
3270 
3271 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
3272 			dev_err(dev, "Out of range protocol %d\n", prot_id);
3273 
3274 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
3275 			dev_err(dev, "SCMI protocol %d not implemented\n",
3276 				prot_id);
3277 			continue;
3278 		}
3279 
3280 		/*
3281 		 * Save this valid DT protocol descriptor amongst
3282 		 * @active_protocols for this SCMI instance/
3283 		 */
3284 		ret = idr_alloc(&info->active_protocols, child,
3285 				prot_id, prot_id + 1, GFP_KERNEL);
3286 		if (ret != prot_id) {
3287 			dev_err(dev, "SCMI protocol %d already activated. Skip\n",
3288 				prot_id);
3289 			continue;
3290 		}
3291 
3292 		of_node_get(child);
3293 		scmi_create_protocol_devices(child, info, prot_id, NULL);
3294 	}
3295 
3296 	return 0;
3297 
3298 notification_exit:
3299 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3300 		scmi_raw_mode_cleanup(info->raw);
3301 	scmi_notification_exit(&info->handle);
3302 clear_dev_req_notifier:
3303 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3304 					   &info->dev_req_nb);
3305 clear_bus_notifier:
3306 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3307 clear_txrx_setup:
3308 	scmi_cleanup_txrx_channels(info);
3309 clear_ida:
3310 	ida_free(&scmi_id, info->id);
3311 
3312 out_err:
3313 	return dev_err_probe(dev, ret, "%s", err_str);
3314 }
3315 
scmi_remove(struct platform_device * pdev)3316 static void scmi_remove(struct platform_device *pdev)
3317 {
3318 	int id;
3319 	struct scmi_info *info = platform_get_drvdata(pdev);
3320 	struct device_node *child;
3321 
3322 	if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3323 		scmi_raw_mode_cleanup(info->raw);
3324 
3325 	mutex_lock(&scmi_list_mutex);
3326 	if (info->users)
3327 		dev_warn(&pdev->dev,
3328 			 "Still active SCMI users will be forcibly unbound.\n");
3329 	list_del(&info->node);
3330 	mutex_unlock(&scmi_list_mutex);
3331 
3332 	scmi_notification_exit(&info->handle);
3333 
3334 	mutex_lock(&info->protocols_mtx);
3335 	idr_destroy(&info->protocols);
3336 	mutex_unlock(&info->protocols_mtx);
3337 
3338 	idr_for_each_entry(&info->active_protocols, child, id)
3339 		of_node_put(child);
3340 	idr_destroy(&info->active_protocols);
3341 
3342 	blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3343 					   &info->dev_req_nb);
3344 	bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3345 
3346 	/* Safe to free channels since no more users */
3347 	scmi_cleanup_txrx_channels(info);
3348 
3349 	ida_free(&scmi_id, info->id);
3350 }
3351 
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)3352 static ssize_t protocol_version_show(struct device *dev,
3353 				     struct device_attribute *attr, char *buf)
3354 {
3355 	struct scmi_info *info = dev_get_drvdata(dev);
3356 
3357 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
3358 		       info->version.minor_ver);
3359 }
3360 static DEVICE_ATTR_RO(protocol_version);
3361 
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)3362 static ssize_t firmware_version_show(struct device *dev,
3363 				     struct device_attribute *attr, char *buf)
3364 {
3365 	struct scmi_info *info = dev_get_drvdata(dev);
3366 
3367 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
3368 }
3369 static DEVICE_ATTR_RO(firmware_version);
3370 
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3371 static ssize_t vendor_id_show(struct device *dev,
3372 			      struct device_attribute *attr, char *buf)
3373 {
3374 	struct scmi_info *info = dev_get_drvdata(dev);
3375 
3376 	return sprintf(buf, "%s\n", info->version.vendor_id);
3377 }
3378 static DEVICE_ATTR_RO(vendor_id);
3379 
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3380 static ssize_t sub_vendor_id_show(struct device *dev,
3381 				  struct device_attribute *attr, char *buf)
3382 {
3383 	struct scmi_info *info = dev_get_drvdata(dev);
3384 
3385 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3386 }
3387 static DEVICE_ATTR_RO(sub_vendor_id);
3388 
3389 static struct attribute *versions_attrs[] = {
3390 	&dev_attr_firmware_version.attr,
3391 	&dev_attr_protocol_version.attr,
3392 	&dev_attr_vendor_id.attr,
3393 	&dev_attr_sub_vendor_id.attr,
3394 	NULL,
3395 };
3396 ATTRIBUTE_GROUPS(versions);
3397 
3398 static struct platform_driver scmi_driver = {
3399 	.driver = {
3400 		   .name = "arm-scmi",
3401 		   .suppress_bind_attrs = true,
3402 		   .dev_groups = versions_groups,
3403 		   },
3404 	.probe = scmi_probe,
3405 	.remove = scmi_remove,
3406 };
3407 
scmi_debugfs_init(void)3408 static struct dentry *scmi_debugfs_init(void)
3409 {
3410 	struct dentry *d;
3411 
3412 	d = debugfs_create_dir("scmi", NULL);
3413 	if (IS_ERR(d)) {
3414 		pr_err("Could NOT create SCMI top dentry.\n");
3415 		return NULL;
3416 	}
3417 
3418 	return d;
3419 }
3420 
scmi_inflight_count(const struct scmi_handle * handle)3421 int scmi_inflight_count(const struct scmi_handle *handle)
3422 {
3423 	if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
3424 		struct scmi_info *info = handle_to_scmi_info(handle);
3425 
3426 		return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
3427 	} else {
3428 		return 0;
3429 	}
3430 }
3431 
scmi_driver_init(void)3432 static int __init scmi_driver_init(void)
3433 {
3434 	scmi_quirks_initialize();
3435 
3436 	/* Bail out if no SCMI transport was configured */
3437 	if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3438 		return -EINVAL;
3439 
3440 	if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
3441 		scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
3442 
3443 	if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
3444 		scmi_trans_core_ops.msg = scmi_message_operations_get();
3445 
3446 	if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3447 		scmi_top_dentry = scmi_debugfs_init();
3448 
3449 	scmi_base_register();
3450 
3451 	scmi_clock_register();
3452 	scmi_perf_register();
3453 	scmi_power_register();
3454 	scmi_reset_register();
3455 	scmi_sensors_register();
3456 	scmi_voltage_register();
3457 	scmi_system_register();
3458 	scmi_powercap_register();
3459 	scmi_pinctrl_register();
3460 
3461 	return platform_driver_register(&scmi_driver);
3462 }
3463 module_init(scmi_driver_init);
3464 
scmi_driver_exit(void)3465 static void __exit scmi_driver_exit(void)
3466 {
3467 	scmi_base_unregister();
3468 
3469 	scmi_clock_unregister();
3470 	scmi_perf_unregister();
3471 	scmi_power_unregister();
3472 	scmi_reset_unregister();
3473 	scmi_sensors_unregister();
3474 	scmi_voltage_unregister();
3475 	scmi_system_unregister();
3476 	scmi_powercap_unregister();
3477 	scmi_pinctrl_unregister();
3478 
3479 	platform_driver_unregister(&scmi_driver);
3480 
3481 	debugfs_remove_recursive(scmi_top_dentry);
3482 }
3483 module_exit(scmi_driver_exit);
3484 
3485 MODULE_ALIAS("platform:arm-scmi");
3486 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3487 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3488 MODULE_LICENSE("GPL v2");
3489