1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2025 ARM Ltd.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/kmod.h>
28 #include <linux/ktime.h>
29 #include <linux/hashtable.h>
30 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/platform_device.h>
34 #include <linux/processor.h>
35 #include <linux/refcount.h>
36 #include <linux/slab.h>
37 #include <linux/xarray.h>
38
39 #include "common.h"
40 #include "notify.h"
41 #include "quirks.h"
42
43 #include "raw_mode.h"
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/scmi.h>
47
48 #define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
49
50 static DEFINE_IDA(scmi_id);
51
52 static DEFINE_XARRAY(scmi_protocols);
53
54 /* List of all SCMI devices active in system */
55 static LIST_HEAD(scmi_list);
56 /* Protection for the entire list */
57 static DEFINE_MUTEX(scmi_list_mutex);
58 /* Track the unique id for the transfers for debug & profiling purpose */
59 static atomic_t transfer_last_id;
60
61 static struct dentry *scmi_top_dentry;
62
63 /**
64 * struct scmi_xfers_info - Structure to manage transfer information
65 *
66 * @xfer_alloc_table: Bitmap table for allocated messages.
67 * Index of this bitmap table is also used for message
68 * sequence identifier.
69 * @xfer_lock: Protection for message allocation
70 * @max_msg: Maximum number of messages that can be pending
71 * @free_xfers: A free list for available to use xfers. It is initialized with
72 * a number of xfers equal to the maximum allowed in-flight
73 * messages.
74 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
75 * currently in-flight messages.
76 */
77 struct scmi_xfers_info {
78 unsigned long *xfer_alloc_table;
79 spinlock_t xfer_lock;
80 int max_msg;
81 struct hlist_head free_xfers;
82 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
83 };
84
85 /**
86 * struct scmi_protocol_instance - Describe an initialized protocol instance.
87 * @handle: Reference to the SCMI handle associated to this protocol instance.
88 * @proto: A reference to the protocol descriptor.
89 * @gid: A reference for per-protocol devres management.
90 * @users: A refcount to track effective users of this protocol.
91 * @priv: Reference for optional protocol private data.
92 * @version: Protocol version supported by the platform as detected at runtime.
93 * @negotiated_version: When the platform supports a newer protocol version,
94 * the agent will try to negotiate with the platform the
95 * usage of the newest version known to it, since
96 * backward compatibility is NOT automatically assured.
97 * This field is NON-zero when a successful negotiation
98 * has completed.
99 * @ph: An embedded protocol handle that will be passed down to protocol
100 * initialization code to identify this instance.
101 *
102 * Each protocol is initialized independently once for each SCMI platform in
103 * which is defined by DT and implemented by the SCMI server fw.
104 */
105 struct scmi_protocol_instance {
106 const struct scmi_handle *handle;
107 const struct scmi_protocol *proto;
108 void *gid;
109 refcount_t users;
110 void *priv;
111 unsigned int version;
112 unsigned int negotiated_version;
113 struct scmi_protocol_handle ph;
114 };
115
116 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
117
118 /**
119 * struct scmi_debug_info - Debug common info
120 * @top_dentry: A reference to the top debugfs dentry
121 * @name: Name of this SCMI instance
122 * @type: Type of this SCMI instance
123 * @is_atomic: Flag to state if the transport of this instance is atomic
124 * @counters: An array of atomic_c's used for tracking statistics (if enabled)
125 */
126 struct scmi_debug_info {
127 struct dentry *top_dentry;
128 const char *name;
129 const char *type;
130 bool is_atomic;
131 atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
132 };
133
134 /**
135 * struct scmi_info - Structure representing a SCMI instance
136 *
137 * @id: A sequence number starting from zero identifying this instance
138 * @dev: Device pointer
139 * @desc: SoC description for this instance
140 * @version: SCMI revision information containing protocol version,
141 * implementation version and (sub-)vendor identification.
142 * @handle: Instance of SCMI handle to send to clients
143 * @tx_minfo: Universal Transmit Message management info
144 * @rx_minfo: Universal Receive Message management info
145 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
146 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
147 * @protocols: IDR for protocols' instance descriptors initialized for
148 * this SCMI instance: populated on protocol's first attempted
149 * usage.
150 * @protocols_mtx: A mutex to protect protocols instances initialization.
151 * @protocols_imp: List of protocols implemented, currently maximum of
152 * scmi_revision_info.num_protocols elements allocated by the
153 * base protocol
154 * @active_protocols: IDR storing device_nodes for protocols actually defined
155 * in the DT and confirmed as implemented by fw.
156 * @notify_priv: Pointer to private data structure specific to notifications.
157 * @node: List head
158 * @users: Number of users of this instance
159 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
160 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
161 * bus
162 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
163 * @dbg: A pointer to debugfs related data (if any)
164 * @raw: An opaque reference handle used by SCMI Raw mode.
165 */
166 struct scmi_info {
167 int id;
168 struct device *dev;
169 const struct scmi_desc *desc;
170 struct scmi_revision_info version;
171 struct scmi_handle handle;
172 struct scmi_xfers_info tx_minfo;
173 struct scmi_xfers_info rx_minfo;
174 struct idr tx_idr;
175 struct idr rx_idr;
176 struct idr protocols;
177 /* Ensure mutual exclusive access to protocols instance array */
178 struct mutex protocols_mtx;
179 u8 *protocols_imp;
180 struct idr active_protocols;
181 void *notify_priv;
182 struct list_head node;
183 int users;
184 struct notifier_block bus_nb;
185 struct notifier_block dev_req_nb;
186 /* Serialize device creation process for this instance */
187 struct mutex devreq_mtx;
188 struct scmi_debug_info *dbg;
189 void *raw;
190 };
191
192 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
193 #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
194 #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
195
196 static void scmi_rx_callback(struct scmi_chan_info *cinfo,
197 u32 msg_hdr, void *priv);
198 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
199 u32 msg_hdr, enum scmi_bad_msg err);
200
201 static struct scmi_transport_core_operations scmi_trans_core_ops = {
202 .bad_message_trace = scmi_bad_message_trace,
203 .rx_callback = scmi_rx_callback,
204 };
205
206 static unsigned long
scmi_vendor_protocol_signature(unsigned int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)207 scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
208 char *sub_vendor_id, u32 impl_ver)
209 {
210 char *signature, *p;
211 unsigned long hash = 0;
212
213 /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
214 signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
215 vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
216 if (!signature)
217 return 0;
218
219 p = signature;
220 while (*p)
221 hash = partial_name_hash(tolower(*p++), hash);
222 hash = end_name_hash(hash);
223
224 kfree(signature);
225
226 return hash;
227 }
228
229 static unsigned long
scmi_protocol_key_calculate(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)230 scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
231 char *sub_vendor_id, u32 impl_ver)
232 {
233 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
234 return protocol_id;
235 else
236 return scmi_vendor_protocol_signature(protocol_id, vendor_id,
237 sub_vendor_id, impl_ver);
238 }
239
240 static const struct scmi_protocol *
__scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)241 __scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
242 char *sub_vendor_id, u32 impl_ver)
243 {
244 unsigned long key;
245 struct scmi_protocol *proto = NULL;
246
247 key = scmi_protocol_key_calculate(protocol_id, vendor_id,
248 sub_vendor_id, impl_ver);
249 if (key)
250 proto = xa_load(&scmi_protocols, key);
251
252 return proto;
253 }
254
255 static const struct scmi_protocol *
scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)256 scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
257 char *sub_vendor_id, u32 impl_ver)
258 {
259 const struct scmi_protocol *proto = NULL;
260
261 /* Searching for closest match ...*/
262 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
263 sub_vendor_id, impl_ver);
264 if (proto)
265 return proto;
266
267 /* Any match just on vendor/sub_vendor ? */
268 if (impl_ver) {
269 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
270 sub_vendor_id, 0);
271 if (proto)
272 return proto;
273 }
274
275 /* Any match just on the vendor ? */
276 if (sub_vendor_id)
277 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
278 NULL, 0);
279 return proto;
280 }
281
282 static const struct scmi_protocol *
scmi_vendor_protocol_get(int protocol_id,struct scmi_revision_info * version)283 scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
284 {
285 const struct scmi_protocol *proto;
286
287 proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
288 version->sub_vendor_id,
289 version->impl_ver);
290 if (!proto) {
291 int ret;
292
293 pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
294 protocol_id, version->vendor_id);
295
296 /* Note that vendor_id is mandatory for vendor protocols */
297 ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
298 protocol_id, version->vendor_id);
299 if (ret) {
300 pr_warn("Problem loading module for protocol 0x%x\n",
301 protocol_id);
302 return NULL;
303 }
304
305 /* Lookup again, once modules loaded */
306 proto = scmi_vendor_protocol_lookup(protocol_id,
307 version->vendor_id,
308 version->sub_vendor_id,
309 version->impl_ver);
310 }
311
312 if (proto)
313 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
314 protocol_id, proto->vendor_id ?: "",
315 proto->sub_vendor_id ?: "", proto->impl_ver);
316
317 return proto;
318 }
319
320 static const struct scmi_protocol *
scmi_protocol_get(int protocol_id,struct scmi_revision_info * version)321 scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
322 {
323 const struct scmi_protocol *proto = NULL;
324
325 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
326 proto = xa_load(&scmi_protocols, protocol_id);
327 else
328 proto = scmi_vendor_protocol_get(protocol_id, version);
329
330 if (!proto || !try_module_get(proto->owner)) {
331 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
332 return NULL;
333 }
334
335 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
336
337 return proto;
338 }
339
scmi_protocol_put(const struct scmi_protocol * proto)340 static void scmi_protocol_put(const struct scmi_protocol *proto)
341 {
342 if (proto)
343 module_put(proto->owner);
344 }
345
scmi_vendor_protocol_check(const struct scmi_protocol * proto)346 static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
347 {
348 if (!proto->vendor_id) {
349 pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
350 return -EINVAL;
351 }
352
353 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
354 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
355 return -EINVAL;
356 }
357
358 if (proto->sub_vendor_id &&
359 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
360 pr_err("malformed sub_vendor_id for protocol 0x%x\n",
361 proto->id);
362 return -EINVAL;
363 }
364
365 return 0;
366 }
367
scmi_protocol_register(const struct scmi_protocol * proto)368 int scmi_protocol_register(const struct scmi_protocol *proto)
369 {
370 int ret;
371 unsigned long key;
372
373 if (!proto) {
374 pr_err("invalid protocol\n");
375 return -EINVAL;
376 }
377
378 if (!proto->instance_init) {
379 pr_err("missing init for protocol 0x%x\n", proto->id);
380 return -EINVAL;
381 }
382
383 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
384 scmi_vendor_protocol_check(proto))
385 return -EINVAL;
386
387 /*
388 * Calculate a protocol key to register this protocol with the core;
389 * key value 0 is considered invalid.
390 */
391 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
392 proto->sub_vendor_id,
393 proto->impl_ver);
394 if (!key)
395 return -EINVAL;
396
397 ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
398 if (ret) {
399 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
400 proto->id, ret);
401 return ret;
402 }
403
404 pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
405 proto->id, proto->vendor_id, proto->sub_vendor_id,
406 proto->impl_ver);
407
408 return 0;
409 }
410 EXPORT_SYMBOL_GPL(scmi_protocol_register);
411
scmi_protocol_unregister(const struct scmi_protocol * proto)412 void scmi_protocol_unregister(const struct scmi_protocol *proto)
413 {
414 unsigned long key;
415
416 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
417 proto->sub_vendor_id,
418 proto->impl_ver);
419 if (!key)
420 return;
421
422 xa_erase(&scmi_protocols, key);
423
424 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
425 }
426 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
427
428 /**
429 * scmi_create_protocol_devices - Create devices for all pending requests for
430 * this SCMI instance.
431 *
432 * @np: The device node describing the protocol
433 * @info: The SCMI instance descriptor
434 * @prot_id: The protocol ID
435 * @name: The optional name of the device to be created: if not provided this
436 * call will lead to the creation of all the devices currently requested
437 * for the specified protocol.
438 */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)439 static void scmi_create_protocol_devices(struct device_node *np,
440 struct scmi_info *info,
441 int prot_id, const char *name)
442 {
443 mutex_lock(&info->devreq_mtx);
444 scmi_device_create(np, info->dev, prot_id, name);
445 mutex_unlock(&info->devreq_mtx);
446 }
447
scmi_destroy_protocol_devices(struct scmi_info * info,int prot_id,const char * name)448 static void scmi_destroy_protocol_devices(struct scmi_info *info,
449 int prot_id, const char *name)
450 {
451 mutex_lock(&info->devreq_mtx);
452 scmi_device_destroy(info->dev, prot_id, name);
453 mutex_unlock(&info->devreq_mtx);
454 }
455
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)456 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
457 void *priv)
458 {
459 struct scmi_info *info = handle_to_scmi_info(handle);
460
461 info->notify_priv = priv;
462 /* Ensure updated protocol private date are visible */
463 smp_wmb();
464 }
465
scmi_notification_instance_data_get(const struct scmi_handle * handle)466 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
467 {
468 struct scmi_info *info = handle_to_scmi_info(handle);
469
470 /* Ensure protocols_private_data has been updated */
471 smp_rmb();
472 return info->notify_priv;
473 }
474
475 /**
476 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
477 *
478 * @minfo: Pointer to Tx/Rx Message management info based on channel type
479 * @xfer: The xfer to act upon
480 *
481 * Pick the next unused monotonically increasing token and set it into
482 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
483 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
484 * of incorrect association of a late and expired xfer with a live in-flight
485 * transaction, both happening to re-use the same token identifier.
486 *
487 * Since platform is NOT required to answer our request in-order we should
488 * account for a few rare but possible scenarios:
489 *
490 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
491 * using find_next_zero_bit() starting from candidate next_token bit
492 *
493 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
494 * are plenty of free tokens at start, so try a second pass using
495 * find_next_zero_bit() and starting from 0.
496 *
497 * X = used in-flight
498 *
499 * Normal
500 * ------
501 *
502 * |- xfer_id picked
503 * -----------+----------------------------------------------------------
504 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
505 * ----------------------------------------------------------------------
506 * ^
507 * |- next_token
508 *
509 * Out-of-order pending at start
510 * -----------------------------
511 *
512 * |- xfer_id picked, last_token fixed
513 * -----+----------------------------------------------------------------
514 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
515 * ----------------------------------------------------------------------
516 * ^
517 * |- next_token
518 *
519 *
520 * Out-of-order pending at end
521 * ---------------------------
522 *
523 * |- xfer_id picked, last_token fixed
524 * -----+----------------------------------------------------------------
525 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
526 * ----------------------------------------------------------------------
527 * ^
528 * |- next_token
529 *
530 * Context: Assumes to be called with @xfer_lock already acquired.
531 *
532 * Return: 0 on Success or error
533 */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)534 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
535 struct scmi_xfer *xfer)
536 {
537 unsigned long xfer_id, next_token;
538
539 /*
540 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
541 * using the pre-allocated transfer_id as a base.
542 * Note that the global transfer_id is shared across all message types
543 * so there could be holes in the allocated set of monotonic sequence
544 * numbers, but that is going to limit the effectiveness of the
545 * mitigation only in very rare limit conditions.
546 */
547 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
548
549 /* Pick the next available xfer_id >= next_token */
550 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
551 MSG_TOKEN_MAX, next_token);
552 if (xfer_id == MSG_TOKEN_MAX) {
553 /*
554 * After heavily out-of-order responses, there are no free
555 * tokens ahead, but only at start of xfer_alloc_table so
556 * try again from the beginning.
557 */
558 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
559 MSG_TOKEN_MAX, 0);
560 /*
561 * Something is wrong if we got here since there can be a
562 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
563 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
564 */
565 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
566 return -ENOMEM;
567 }
568
569 /* Update +/- last_token accordingly if we skipped some hole */
570 if (xfer_id != next_token)
571 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
572
573 xfer->hdr.seq = (u16)xfer_id;
574
575 return 0;
576 }
577
578 /**
579 * scmi_xfer_token_clear - Release the token
580 *
581 * @minfo: Pointer to Tx/Rx Message management info based on channel type
582 * @xfer: The xfer to act upon
583 */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)584 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
585 struct scmi_xfer *xfer)
586 {
587 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
588 }
589
590 /**
591 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
592 *
593 * @xfer: The xfer to register
594 * @minfo: Pointer to Tx/Rx Message management info based on channel type
595 *
596 * Note that this helper assumes that the xfer to be registered as in-flight
597 * had been built using an xfer sequence number which still corresponds to a
598 * free slot in the xfer_alloc_table.
599 *
600 * Context: Assumes to be called with @xfer_lock already acquired.
601 */
602 static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)603 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
604 struct scmi_xfers_info *minfo)
605 {
606 /* Set in-flight */
607 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
608 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
609 xfer->pending = true;
610 }
611
612 /**
613 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
614 *
615 * @xfer: The xfer to register
616 * @minfo: Pointer to Tx/Rx Message management info based on channel type
617 *
618 * Note that this helper does NOT assume anything about the sequence number
619 * that was baked into the provided xfer, so it checks at first if it can
620 * be mapped to a free slot and fails with an error if another xfer with the
621 * same sequence number is currently still registered as in-flight.
622 *
623 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
624 * could not rbe mapped to a free slot in the xfer_alloc_table.
625 */
scmi_xfer_inflight_register(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)626 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
627 struct scmi_xfers_info *minfo)
628 {
629 int ret = 0;
630 unsigned long flags;
631
632 spin_lock_irqsave(&minfo->xfer_lock, flags);
633 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
634 scmi_xfer_inflight_register_unlocked(xfer, minfo);
635 else
636 ret = -EBUSY;
637 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
638
639 return ret;
640 }
641
642 /**
643 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
644 * flight on the TX channel, if possible.
645 *
646 * @handle: Pointer to SCMI entity handle
647 * @xfer: The xfer to register
648 *
649 * Return: 0 on Success, error otherwise
650 */
scmi_xfer_raw_inflight_register(const struct scmi_handle * handle,struct scmi_xfer * xfer)651 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
652 struct scmi_xfer *xfer)
653 {
654 struct scmi_info *info = handle_to_scmi_info(handle);
655
656 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
657 }
658
659 /**
660 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
661 * as pending in-flight
662 *
663 * @xfer: The xfer to act upon
664 * @minfo: Pointer to Tx/Rx Message management info based on channel type
665 *
666 * Return: 0 on Success or error otherwise
667 */
scmi_xfer_pending_set(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)668 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
669 struct scmi_xfers_info *minfo)
670 {
671 int ret;
672 unsigned long flags;
673
674 spin_lock_irqsave(&minfo->xfer_lock, flags);
675 /* Set a new monotonic token as the xfer sequence number */
676 ret = scmi_xfer_token_set(minfo, xfer);
677 if (!ret)
678 scmi_xfer_inflight_register_unlocked(xfer, minfo);
679 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
680
681 return ret;
682 }
683
684 /**
685 * scmi_xfer_get() - Allocate one message
686 *
687 * @handle: Pointer to SCMI entity handle
688 * @minfo: Pointer to Tx/Rx Message management info based on channel type
689 *
690 * Helper function which is used by various message functions that are
691 * exposed to clients of this driver for allocating a message traffic event.
692 *
693 * Picks an xfer from the free list @free_xfers (if any available) and perform
694 * a basic initialization.
695 *
696 * Note that, at this point, still no sequence number is assigned to the
697 * allocated xfer, nor it is registered as a pending transaction.
698 *
699 * The successfully initialized xfer is refcounted.
700 *
701 * Context: Holds @xfer_lock while manipulating @free_xfers.
702 *
703 * Return: An initialized xfer if all went fine, else pointer error.
704 */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)705 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
706 struct scmi_xfers_info *minfo)
707 {
708 unsigned long flags;
709 struct scmi_xfer *xfer;
710
711 spin_lock_irqsave(&minfo->xfer_lock, flags);
712 if (hlist_empty(&minfo->free_xfers)) {
713 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
714 return ERR_PTR(-ENOMEM);
715 }
716
717 /* grab an xfer from the free_list */
718 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
719 hlist_del_init(&xfer->node);
720
721 /*
722 * Allocate transfer_id early so that can be used also as base for
723 * monotonic sequence number generation if needed.
724 */
725 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
726
727 refcount_set(&xfer->users, 1);
728 atomic_set(&xfer->busy, SCMI_XFER_FREE);
729 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
730
731 return xfer;
732 }
733
734 /**
735 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
736 *
737 * @handle: Pointer to SCMI entity handle
738 *
739 * Note that xfer is taken from the TX channel structures.
740 *
741 * Return: A valid xfer on Success, or an error-pointer otherwise
742 */
scmi_xfer_raw_get(const struct scmi_handle * handle)743 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
744 {
745 struct scmi_xfer *xfer;
746 struct scmi_info *info = handle_to_scmi_info(handle);
747
748 xfer = scmi_xfer_get(handle, &info->tx_minfo);
749 if (!IS_ERR(xfer))
750 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
751
752 return xfer;
753 }
754
755 /**
756 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
757 * to use for a specific protocol_id Raw transaction.
758 *
759 * @handle: Pointer to SCMI entity handle
760 * @protocol_id: Identifier of the protocol
761 *
762 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
763 * the DT to have an associated channel and be usable; but in Raw mode any
764 * protocol in range is allowed, re-using the Base channel, so as to enable
765 * fuzzing on any protocol without the need of a fully compiled DT.
766 *
767 * Return: A reference to the channel to use, or an ERR_PTR
768 */
769 struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle * handle,u8 protocol_id)770 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
771 {
772 struct scmi_chan_info *cinfo;
773 struct scmi_info *info = handle_to_scmi_info(handle);
774
775 cinfo = idr_find(&info->tx_idr, protocol_id);
776 if (!cinfo) {
777 if (protocol_id == SCMI_PROTOCOL_BASE)
778 return ERR_PTR(-EINVAL);
779 /* Use Base channel for protocols not defined for DT */
780 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
781 if (!cinfo)
782 return ERR_PTR(-EINVAL);
783 dev_warn_once(handle->dev,
784 "Using Base channel for protocol 0x%X\n",
785 protocol_id);
786 }
787
788 return cinfo;
789 }
790
791 /**
792 * __scmi_xfer_put() - Release a message
793 *
794 * @minfo: Pointer to Tx/Rx Message management info based on channel type
795 * @xfer: message that was reserved by scmi_xfer_get
796 *
797 * After refcount check, possibly release an xfer, clearing the token slot,
798 * removing xfer from @pending_xfers and putting it back into free_xfers.
799 *
800 * This holds a spinlock to maintain integrity of internal data structures.
801 */
802 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)803 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
804 {
805 unsigned long flags;
806
807 spin_lock_irqsave(&minfo->xfer_lock, flags);
808 if (refcount_dec_and_test(&xfer->users)) {
809 if (xfer->pending) {
810 scmi_xfer_token_clear(minfo, xfer);
811 hash_del(&xfer->node);
812 xfer->pending = false;
813 }
814 hlist_add_head(&xfer->node, &minfo->free_xfers);
815 }
816 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
817 }
818
819 /**
820 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
821 *
822 * @handle: Pointer to SCMI entity handle
823 * @xfer: A reference to the xfer to put
824 *
825 * Note that as with other xfer_put() handlers the xfer is really effectively
826 * released only if there are no more users on the system.
827 */
scmi_xfer_raw_put(const struct scmi_handle * handle,struct scmi_xfer * xfer)828 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
829 {
830 struct scmi_info *info = handle_to_scmi_info(handle);
831
832 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
833 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
834 return __scmi_xfer_put(&info->tx_minfo, xfer);
835 }
836
837 /**
838 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
839 *
840 * @minfo: Pointer to Tx/Rx Message management info based on channel type
841 * @xfer_id: Token ID to lookup in @pending_xfers
842 *
843 * Refcounting is untouched.
844 *
845 * Context: Assumes to be called with @xfer_lock already acquired.
846 *
847 * Return: A valid xfer on Success or error otherwise
848 */
849 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)850 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
851 {
852 struct scmi_xfer *xfer = NULL;
853
854 if (test_bit(xfer_id, minfo->xfer_alloc_table))
855 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
856
857 return xfer ?: ERR_PTR(-EINVAL);
858 }
859
860 /**
861 * scmi_bad_message_trace - A helper to trace weird messages
862 *
863 * @cinfo: A reference to the channel descriptor on which the message was
864 * received
865 * @msg_hdr: Message header to track
866 * @err: A specific error code used as a status value in traces.
867 *
868 * This helper can be used to trace any kind of weird, incomplete, unexpected,
869 * timed-out message that arrives and as such, can be traced only referring to
870 * the header content, since the payload is missing/unreliable.
871 */
scmi_bad_message_trace(struct scmi_chan_info * cinfo,u32 msg_hdr,enum scmi_bad_msg err)872 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
873 enum scmi_bad_msg err)
874 {
875 char *tag;
876 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
877
878 switch (MSG_XTRACT_TYPE(msg_hdr)) {
879 case MSG_TYPE_COMMAND:
880 tag = "!RESP";
881 break;
882 case MSG_TYPE_DELAYED_RESP:
883 tag = "!DLYD";
884 break;
885 case MSG_TYPE_NOTIFICATION:
886 tag = "!NOTI";
887 break;
888 default:
889 tag = "!UNKN";
890 break;
891 }
892
893 trace_scmi_msg_dump(info->id, cinfo->id,
894 MSG_XTRACT_PROT_ID(msg_hdr),
895 MSG_XTRACT_ID(msg_hdr), tag,
896 MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
897 }
898
899 /**
900 * scmi_msg_response_validate - Validate message type against state of related
901 * xfer
902 *
903 * @cinfo: A reference to the channel descriptor.
904 * @msg_type: Message type to check
905 * @xfer: A reference to the xfer to validate against @msg_type
906 *
907 * This function checks if @msg_type is congruent with the current state of
908 * a pending @xfer; if an asynchronous delayed response is received before the
909 * related synchronous response (Out-of-Order Delayed Response) the missing
910 * synchronous response is assumed to be OK and completed, carrying on with the
911 * Delayed Response: this is done to address the case in which the underlying
912 * SCMI transport can deliver such out-of-order responses.
913 *
914 * Context: Assumes to be called with xfer->lock already acquired.
915 *
916 * Return: 0 on Success, error otherwise
917 */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)918 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
919 u8 msg_type,
920 struct scmi_xfer *xfer)
921 {
922 /*
923 * Even if a response was indeed expected on this slot at this point,
924 * a buggy platform could wrongly reply feeding us an unexpected
925 * delayed response we're not prepared to handle: bail-out safely
926 * blaming firmware.
927 */
928 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
929 dev_err(cinfo->dev,
930 "Delayed Response for %d not expected! Buggy F/W ?\n",
931 xfer->hdr.seq);
932 return -EINVAL;
933 }
934
935 switch (xfer->state) {
936 case SCMI_XFER_SENT_OK:
937 if (msg_type == MSG_TYPE_DELAYED_RESP) {
938 /*
939 * Delayed Response expected but delivered earlier.
940 * Assume message RESPONSE was OK and skip state.
941 */
942 xfer->hdr.status = SCMI_SUCCESS;
943 xfer->state = SCMI_XFER_RESP_OK;
944 complete(&xfer->done);
945 dev_warn(cinfo->dev,
946 "Received valid OoO Delayed Response for %d\n",
947 xfer->hdr.seq);
948 }
949 break;
950 case SCMI_XFER_RESP_OK:
951 if (msg_type != MSG_TYPE_DELAYED_RESP)
952 return -EINVAL;
953 break;
954 case SCMI_XFER_DRESP_OK:
955 /* No further message expected once in SCMI_XFER_DRESP_OK */
956 return -EINVAL;
957 }
958
959 return 0;
960 }
961
962 /**
963 * scmi_xfer_state_update - Update xfer state
964 *
965 * @xfer: A reference to the xfer to update
966 * @msg_type: Type of message being processed.
967 *
968 * Note that this message is assumed to have been already successfully validated
969 * by @scmi_msg_response_validate(), so here we just update the state.
970 *
971 * Context: Assumes to be called on an xfer exclusively acquired using the
972 * busy flag.
973 */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)974 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
975 {
976 xfer->hdr.type = msg_type;
977
978 /* Unknown command types were already discarded earlier */
979 if (xfer->hdr.type == MSG_TYPE_COMMAND)
980 xfer->state = SCMI_XFER_RESP_OK;
981 else
982 xfer->state = SCMI_XFER_DRESP_OK;
983 }
984
scmi_xfer_acquired(struct scmi_xfer * xfer)985 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
986 {
987 int ret;
988
989 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
990
991 return ret == SCMI_XFER_FREE;
992 }
993
994 /**
995 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
996 *
997 * @cinfo: A reference to the channel descriptor.
998 * @msg_hdr: A message header to use as lookup key
999 *
1000 * When a valid xfer is found for the sequence number embedded in the provided
1001 * msg_hdr, reference counting is properly updated and exclusive access to this
1002 * xfer is granted till released with @scmi_xfer_command_release.
1003 *
1004 * Return: A valid @xfer on Success or error otherwise.
1005 */
1006 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)1007 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
1008 {
1009 int ret;
1010 unsigned long flags;
1011 struct scmi_xfer *xfer;
1012 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1013 struct scmi_xfers_info *minfo = &info->tx_minfo;
1014 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1015 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
1016
1017 /* Are we even expecting this? */
1018 spin_lock_irqsave(&minfo->xfer_lock, flags);
1019 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
1020 if (IS_ERR(xfer)) {
1021 dev_err(cinfo->dev,
1022 "Message for %d type %d is not expected!\n",
1023 xfer_id, msg_type);
1024 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1025
1026 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
1027 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
1028
1029 return xfer;
1030 }
1031 refcount_inc(&xfer->users);
1032 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1033
1034 spin_lock_irqsave(&xfer->lock, flags);
1035 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
1036 /*
1037 * If a pending xfer was found which was also in a congruent state with
1038 * the received message, acquire exclusive access to it setting the busy
1039 * flag.
1040 * Spins only on the rare limit condition of concurrent reception of
1041 * RESP and DRESP for the same xfer.
1042 */
1043 if (!ret) {
1044 spin_until_cond(scmi_xfer_acquired(xfer));
1045 scmi_xfer_state_update(xfer, msg_type);
1046 }
1047 spin_unlock_irqrestore(&xfer->lock, flags);
1048
1049 if (ret) {
1050 dev_err(cinfo->dev,
1051 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1052 msg_type, xfer_id, msg_hdr, xfer->state);
1053
1054 scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
1055 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
1056
1057 /* On error the refcount incremented above has to be dropped */
1058 __scmi_xfer_put(minfo, xfer);
1059 xfer = ERR_PTR(-EINVAL);
1060 }
1061
1062 return xfer;
1063 }
1064
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)1065 static inline void scmi_xfer_command_release(struct scmi_info *info,
1066 struct scmi_xfer *xfer)
1067 {
1068 atomic_set(&xfer->busy, SCMI_XFER_FREE);
1069 __scmi_xfer_put(&info->tx_minfo, xfer);
1070 }
1071
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)1072 static inline void scmi_clear_channel(struct scmi_info *info,
1073 struct scmi_chan_info *cinfo)
1074 {
1075 if (!cinfo->is_p2a) {
1076 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1077 return;
1078 }
1079
1080 if (info->desc->ops->clear_channel)
1081 info->desc->ops->clear_channel(cinfo);
1082 }
1083
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1084 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
1085 u32 msg_hdr, void *priv)
1086 {
1087 struct scmi_xfer *xfer;
1088 struct device *dev = cinfo->dev;
1089 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1090 struct scmi_xfers_info *minfo = &info->rx_minfo;
1091 ktime_t ts;
1092
1093 ts = ktime_get_boottime();
1094 xfer = scmi_xfer_get(cinfo->handle, minfo);
1095 if (IS_ERR(xfer)) {
1096 dev_err(dev, "failed to get free message slot (%ld)\n",
1097 PTR_ERR(xfer));
1098
1099 scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
1100 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
1101
1102 scmi_clear_channel(info, cinfo);
1103 return;
1104 }
1105
1106 unpack_scmi_header(msg_hdr, &xfer->hdr);
1107 if (priv)
1108 /* Ensure order between xfer->priv store and following ops */
1109 smp_store_mb(xfer->priv, priv);
1110 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1111 xfer);
1112
1113 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1114 xfer->hdr.id, "NOTI", xfer->hdr.seq,
1115 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1116 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
1117
1118 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1119 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1120
1121 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1122 xfer->hdr.protocol_id, xfer->hdr.seq,
1123 MSG_TYPE_NOTIFICATION);
1124
1125 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1126 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1127 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1128 cinfo->id);
1129 }
1130
1131 __scmi_xfer_put(minfo, xfer);
1132
1133 scmi_clear_channel(info, cinfo);
1134 }
1135
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1136 static void scmi_handle_response(struct scmi_chan_info *cinfo,
1137 u32 msg_hdr, void *priv)
1138 {
1139 struct scmi_xfer *xfer;
1140 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1141
1142 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
1143 if (IS_ERR(xfer)) {
1144 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
1145 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1146
1147 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
1148 scmi_clear_channel(info, cinfo);
1149 return;
1150 }
1151
1152 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1153 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1154 xfer->rx.len = info->desc->max_msg_size;
1155
1156 if (priv)
1157 /* Ensure order between xfer->priv store and following ops */
1158 smp_store_mb(xfer->priv, priv);
1159 info->desc->ops->fetch_response(cinfo, xfer);
1160
1161 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1162 xfer->hdr.id,
1163 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1164 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
1165 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
1166 xfer->hdr.seq, xfer->hdr.status,
1167 xfer->rx.buf, xfer->rx.len);
1168
1169 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1170 xfer->hdr.protocol_id, xfer->hdr.seq,
1171 xfer->hdr.type);
1172
1173 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1174 scmi_clear_channel(info, cinfo);
1175 complete(xfer->async_done);
1176 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
1177 } else {
1178 complete(&xfer->done);
1179 scmi_inc_count(info->dbg->counters, RESPONSE_OK);
1180 }
1181
1182 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1183 /*
1184 * When in polling mode avoid to queue the Raw xfer on the IRQ
1185 * RX path since it will be already queued at the end of the TX
1186 * poll loop.
1187 */
1188 if (!xfer->hdr.poll_completion ||
1189 xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1190 scmi_raw_message_report(info->raw, xfer,
1191 SCMI_RAW_REPLY_QUEUE,
1192 cinfo->id);
1193 }
1194
1195 scmi_xfer_command_release(info, xfer);
1196 }
1197
1198 /**
1199 * scmi_rx_callback() - callback for receiving messages
1200 *
1201 * @cinfo: SCMI channel info
1202 * @msg_hdr: Message header
1203 * @priv: Transport specific private data.
1204 *
1205 * Processes one received message to appropriate transfer information and
1206 * signals completion of the transfer.
1207 *
1208 * NOTE: This function will be invoked in IRQ context, hence should be
1209 * as optimal as possible.
1210 */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1211 static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
1212 void *priv)
1213 {
1214 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1215
1216 switch (msg_type) {
1217 case MSG_TYPE_NOTIFICATION:
1218 scmi_handle_notification(cinfo, msg_hdr, priv);
1219 break;
1220 case MSG_TYPE_COMMAND:
1221 case MSG_TYPE_DELAYED_RESP:
1222 scmi_handle_response(cinfo, msg_hdr, priv);
1223 break;
1224 default:
1225 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1226 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
1227 break;
1228 }
1229 }
1230
1231 /**
1232 * xfer_put() - Release a transmit message
1233 *
1234 * @ph: Pointer to SCMI protocol handle
1235 * @xfer: message that was reserved by xfer_get_init
1236 */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1237 static void xfer_put(const struct scmi_protocol_handle *ph,
1238 struct scmi_xfer *xfer)
1239 {
1240 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1241 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1242
1243 __scmi_xfer_put(&info->tx_minfo, xfer);
1244 }
1245
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop,bool * ooo)1246 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1247 struct scmi_xfer *xfer, ktime_t stop,
1248 bool *ooo)
1249 {
1250 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1251
1252 /*
1253 * Poll also on xfer->done so that polling can be forcibly terminated
1254 * in case of out-of-order receptions of delayed responses
1255 */
1256 return info->desc->ops->poll_done(cinfo, xfer) ||
1257 (*ooo = try_wait_for_completion(&xfer->done)) ||
1258 ktime_after(ktime_get(), stop);
1259 }
1260
scmi_wait_for_reply(struct device * dev,const struct scmi_desc * desc,struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1261 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1262 struct scmi_chan_info *cinfo,
1263 struct scmi_xfer *xfer, unsigned int timeout_ms)
1264 {
1265 int ret = 0;
1266 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1267
1268 if (xfer->hdr.poll_completion) {
1269 /*
1270 * Real polling is needed only if transport has NOT declared
1271 * itself to support synchronous commands replies.
1272 */
1273 if (!desc->sync_cmds_completed_on_ret) {
1274 bool ooo = false;
1275
1276 /*
1277 * Poll on xfer using transport provided .poll_done();
1278 * assumes no completion interrupt was available.
1279 */
1280 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1281
1282 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
1283 stop, &ooo));
1284 if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
1285 dev_err(dev,
1286 "timed out in resp(caller: %pS) - polling\n",
1287 (void *)_RET_IP_);
1288 ret = -ETIMEDOUT;
1289 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
1290 }
1291 }
1292
1293 if (!ret) {
1294 unsigned long flags;
1295
1296 /*
1297 * Do not fetch_response if an out-of-order delayed
1298 * response is being processed.
1299 */
1300 spin_lock_irqsave(&xfer->lock, flags);
1301 if (xfer->state == SCMI_XFER_SENT_OK) {
1302 desc->ops->fetch_response(cinfo, xfer);
1303 xfer->state = SCMI_XFER_RESP_OK;
1304 }
1305 spin_unlock_irqrestore(&xfer->lock, flags);
1306
1307 /* Trace polled replies. */
1308 trace_scmi_msg_dump(info->id, cinfo->id,
1309 xfer->hdr.protocol_id, xfer->hdr.id,
1310 !SCMI_XFER_IS_RAW(xfer) ?
1311 "RESP" : "resp",
1312 xfer->hdr.seq, xfer->hdr.status,
1313 xfer->rx.buf, xfer->rx.len);
1314 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
1315
1316 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1317 scmi_raw_message_report(info->raw, xfer,
1318 SCMI_RAW_REPLY_QUEUE,
1319 cinfo->id);
1320 }
1321 }
1322 } else {
1323 /* And we wait for the response. */
1324 if (!wait_for_completion_timeout(&xfer->done,
1325 msecs_to_jiffies(timeout_ms))) {
1326 dev_err(dev, "timed out in resp(caller: %pS)\n",
1327 (void *)_RET_IP_);
1328 ret = -ETIMEDOUT;
1329 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
1330 }
1331 }
1332
1333 return ret;
1334 }
1335
1336 /**
1337 * scmi_wait_for_message_response - An helper to group all the possible ways of
1338 * waiting for a synchronous message response.
1339 *
1340 * @cinfo: SCMI channel info
1341 * @xfer: Reference to the transfer being waited for.
1342 *
1343 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1344 * configuration flags like xfer->hdr.poll_completion.
1345 *
1346 * Return: 0 on Success, error otherwise.
1347 */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)1348 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1349 struct scmi_xfer *xfer)
1350 {
1351 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1352 struct device *dev = info->dev;
1353
1354 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1355 xfer->hdr.protocol_id, xfer->hdr.seq,
1356 info->desc->max_rx_timeout_ms,
1357 xfer->hdr.poll_completion);
1358
1359 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1360 info->desc->max_rx_timeout_ms);
1361 }
1362
1363 /**
1364 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1365 * reply to an xfer raw request on a specific channel for the required timeout.
1366 *
1367 * @cinfo: SCMI channel info
1368 * @xfer: Reference to the transfer being waited for.
1369 * @timeout_ms: The maximum timeout in milliseconds
1370 *
1371 * Return: 0 on Success, error otherwise.
1372 */
scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1373 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1374 struct scmi_xfer *xfer,
1375 unsigned int timeout_ms)
1376 {
1377 int ret;
1378 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1379 struct device *dev = info->dev;
1380
1381 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1382 if (ret)
1383 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1384 pack_scmi_header(&xfer->hdr));
1385
1386 return ret;
1387 }
1388
1389 /**
1390 * do_xfer() - Do one transfer
1391 *
1392 * @ph: Pointer to SCMI protocol handle
1393 * @xfer: Transfer to initiate and wait for response
1394 *
1395 * Return: -ETIMEDOUT in case of no response, if transmit error,
1396 * return corresponding error, else if all goes well,
1397 * return 0.
1398 */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1399 static int do_xfer(const struct scmi_protocol_handle *ph,
1400 struct scmi_xfer *xfer)
1401 {
1402 int ret;
1403 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1404 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1405 struct device *dev = info->dev;
1406 struct scmi_chan_info *cinfo;
1407
1408 /* Check for polling request on custom command xfers at first */
1409 if (xfer->hdr.poll_completion &&
1410 !is_transport_polling_capable(info->desc)) {
1411 dev_warn_once(dev,
1412 "Polling mode is not supported by transport.\n");
1413 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
1414 return -EINVAL;
1415 }
1416
1417 cinfo = idr_find(&info->tx_idr, pi->proto->id);
1418 if (unlikely(!cinfo)) {
1419 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
1420 return -EINVAL;
1421 }
1422 /* True ONLY if also supported by transport. */
1423 if (is_polling_enabled(cinfo, info->desc))
1424 xfer->hdr.poll_completion = true;
1425
1426 /*
1427 * Initialise protocol id now from protocol handle to avoid it being
1428 * overridden by mistake (or malice) by the protocol code mangling with
1429 * the scmi_xfer structure prior to this.
1430 */
1431 xfer->hdr.protocol_id = pi->proto->id;
1432 reinit_completion(&xfer->done);
1433
1434 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1435 xfer->hdr.protocol_id, xfer->hdr.seq,
1436 xfer->hdr.poll_completion);
1437
1438 /* Clear any stale status */
1439 xfer->hdr.status = SCMI_SUCCESS;
1440 xfer->state = SCMI_XFER_SENT_OK;
1441 /*
1442 * Even though spinlocking is not needed here since no race is possible
1443 * on xfer->state due to the monotonically increasing tokens allocation,
1444 * we must anyway ensure xfer->state initialization is not re-ordered
1445 * after the .send_message() to be sure that on the RX path an early
1446 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1447 */
1448 smp_mb();
1449
1450 ret = info->desc->ops->send_message(cinfo, xfer);
1451 if (ret < 0) {
1452 dev_dbg(dev, "Failed to send message %d\n", ret);
1453 scmi_inc_count(info->dbg->counters, SENT_FAIL);
1454 return ret;
1455 }
1456
1457 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1458 xfer->hdr.id, "CMND", xfer->hdr.seq,
1459 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1460 scmi_inc_count(info->dbg->counters, SENT_OK);
1461
1462 ret = scmi_wait_for_message_response(cinfo, xfer);
1463 if (!ret && xfer->hdr.status) {
1464 ret = scmi_to_linux_errno(xfer->hdr.status);
1465 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
1466 }
1467
1468 if (info->desc->ops->mark_txdone)
1469 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1470
1471 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1472 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1473
1474 return ret;
1475 }
1476
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1477 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1478 struct scmi_xfer *xfer)
1479 {
1480 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1481 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1482
1483 xfer->rx.len = info->desc->max_msg_size;
1484 }
1485
1486 /**
1487 * do_xfer_with_response() - Do one transfer and wait until the delayed
1488 * response is received
1489 *
1490 * @ph: Pointer to SCMI protocol handle
1491 * @xfer: Transfer to initiate and wait for response
1492 *
1493 * Using asynchronous commands in atomic/polling mode should be avoided since
1494 * it could cause long busy-waiting here, so ignore polling for the delayed
1495 * response and WARN if it was requested for this command transaction since
1496 * upper layers should refrain from issuing such kind of requests.
1497 *
1498 * The only other option would have been to refrain from using any asynchronous
1499 * command even if made available, when an atomic transport is detected, and
1500 * instead forcibly use the synchronous version (thing that can be easily
1501 * attained at the protocol layer), but this would also have led to longer
1502 * stalls of the channel for synchronous commands and possibly timeouts.
1503 * (in other words there is usually a good reason if a platform provides an
1504 * asynchronous version of a command and we should prefer to use it...just not
1505 * when using atomic/polling mode)
1506 *
1507 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1508 * return corresponding error, else if all goes well, return 0.
1509 */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1510 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1511 struct scmi_xfer *xfer)
1512 {
1513 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1514 DECLARE_COMPLETION_ONSTACK(async_response);
1515
1516 xfer->async_done = &async_response;
1517
1518 /*
1519 * Delayed responses should not be polled, so an async command should
1520 * not have been used when requiring an atomic/poll context; WARN and
1521 * perform instead a sleeping wait.
1522 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1523 */
1524 WARN_ON_ONCE(xfer->hdr.poll_completion);
1525
1526 ret = do_xfer(ph, xfer);
1527 if (!ret) {
1528 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1529 dev_err(ph->dev,
1530 "timed out in delayed resp(caller: %pS)\n",
1531 (void *)_RET_IP_);
1532 ret = -ETIMEDOUT;
1533 } else if (xfer->hdr.status) {
1534 ret = scmi_to_linux_errno(xfer->hdr.status);
1535 }
1536 }
1537
1538 xfer->async_done = NULL;
1539 return ret;
1540 }
1541
1542 /**
1543 * xfer_get_init() - Allocate and initialise one message for transmit
1544 *
1545 * @ph: Pointer to SCMI protocol handle
1546 * @msg_id: Message identifier
1547 * @tx_size: transmit message size
1548 * @rx_size: receive message size
1549 * @p: pointer to the allocated and initialised message
1550 *
1551 * This function allocates the message using @scmi_xfer_get and
1552 * initialise the header.
1553 *
1554 * Return: 0 if all went fine with @p pointing to message, else
1555 * corresponding error.
1556 */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1557 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1558 u8 msg_id, size_t tx_size, size_t rx_size,
1559 struct scmi_xfer **p)
1560 {
1561 int ret;
1562 struct scmi_xfer *xfer;
1563 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1564 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1565 struct scmi_xfers_info *minfo = &info->tx_minfo;
1566 struct device *dev = info->dev;
1567
1568 /* Ensure we have sane transfer sizes */
1569 if (rx_size > info->desc->max_msg_size ||
1570 tx_size > info->desc->max_msg_size)
1571 return -ERANGE;
1572
1573 xfer = scmi_xfer_get(pi->handle, minfo);
1574 if (IS_ERR(xfer)) {
1575 ret = PTR_ERR(xfer);
1576 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1577 return ret;
1578 }
1579
1580 /* Pick a sequence number and register this xfer as in-flight */
1581 ret = scmi_xfer_pending_set(xfer, minfo);
1582 if (ret) {
1583 dev_err(pi->handle->dev,
1584 "Failed to get monotonic token %d\n", ret);
1585 __scmi_xfer_put(minfo, xfer);
1586 return ret;
1587 }
1588
1589 xfer->tx.len = tx_size;
1590 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1591 xfer->hdr.type = MSG_TYPE_COMMAND;
1592 xfer->hdr.id = msg_id;
1593 xfer->hdr.poll_completion = false;
1594
1595 *p = xfer;
1596
1597 return 0;
1598 }
1599
1600 /**
1601 * version_get() - command to get the revision of the SCMI entity
1602 *
1603 * @ph: Pointer to SCMI protocol handle
1604 * @version: Holds returned version of protocol.
1605 *
1606 * Updates the SCMI information in the internal data structure.
1607 *
1608 * Return: 0 if all went fine, else return appropriate error.
1609 */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1610 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1611 {
1612 int ret;
1613 __le32 *rev_info;
1614 struct scmi_xfer *t;
1615
1616 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1617 if (ret)
1618 return ret;
1619
1620 ret = do_xfer(ph, t);
1621 if (!ret) {
1622 rev_info = t->rx.buf;
1623 *version = le32_to_cpu(*rev_info);
1624 }
1625
1626 xfer_put(ph, t);
1627 return ret;
1628 }
1629
1630 /**
1631 * scmi_set_protocol_priv - Set protocol specific data at init time
1632 *
1633 * @ph: A reference to the protocol handle.
1634 * @priv: The private data to set.
1635 * @version: The detected protocol version for the core to register.
1636 *
1637 * Return: 0 on Success
1638 */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv,u32 version)1639 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1640 void *priv, u32 version)
1641 {
1642 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1643
1644 pi->priv = priv;
1645 pi->version = version;
1646
1647 return 0;
1648 }
1649
1650 /**
1651 * scmi_get_protocol_priv - Set protocol specific data at init time
1652 *
1653 * @ph: A reference to the protocol handle.
1654 *
1655 * Return: Protocol private data if any was set.
1656 */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1657 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1658 {
1659 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1660
1661 return pi->priv;
1662 }
1663
1664 static const struct scmi_xfer_ops xfer_ops = {
1665 .version_get = version_get,
1666 .xfer_get_init = xfer_get_init,
1667 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1668 .do_xfer = do_xfer,
1669 .do_xfer_with_response = do_xfer_with_response,
1670 .xfer_put = xfer_put,
1671 };
1672
1673 struct scmi_msg_resp_domain_name_get {
1674 __le32 flags;
1675 u8 name[SCMI_MAX_STR_SIZE];
1676 };
1677
1678 /**
1679 * scmi_common_extended_name_get - Common helper to get extended resources name
1680 * @ph: A protocol handle reference.
1681 * @cmd_id: The specific command ID to use.
1682 * @res_id: The specific resource ID to use.
1683 * @flags: A pointer to specific flags to use, if any.
1684 * @name: A pointer to the preallocated area where the retrieved name will be
1685 * stored as a NULL terminated string.
1686 * @len: The len in bytes of the @name char array.
1687 *
1688 * Return: 0 on Succcess
1689 */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,u32 * flags,char * name,size_t len)1690 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1691 u8 cmd_id, u32 res_id, u32 *flags,
1692 char *name, size_t len)
1693 {
1694 int ret;
1695 size_t txlen;
1696 struct scmi_xfer *t;
1697 struct scmi_msg_resp_domain_name_get *resp;
1698
1699 txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1700 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1701 if (ret)
1702 goto out;
1703
1704 put_unaligned_le32(res_id, t->tx.buf);
1705 if (flags)
1706 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1707 resp = t->rx.buf;
1708
1709 ret = ph->xops->do_xfer(ph, t);
1710 if (!ret)
1711 strscpy(name, resp->name, len);
1712
1713 ph->xops->xfer_put(ph, t);
1714 out:
1715 if (ret)
1716 dev_warn(ph->dev,
1717 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1718 res_id, ret, name);
1719 return ret;
1720 }
1721
1722 /**
1723 * scmi_common_get_max_msg_size - Get maximum message size
1724 * @ph: A protocol handle reference.
1725 *
1726 * Return: Maximum message size for the current protocol.
1727 */
scmi_common_get_max_msg_size(const struct scmi_protocol_handle * ph)1728 static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1729 {
1730 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1731 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1732
1733 return info->desc->max_msg_size;
1734 }
1735
1736 /**
1737 * scmi_protocol_msg_check - Check protocol message attributes
1738 *
1739 * @ph: A reference to the protocol handle.
1740 * @message_id: The ID of the message to check.
1741 * @attributes: A parameter to optionally return the retrieved message
1742 * attributes, in case of Success.
1743 *
1744 * An helper to check protocol message attributes for a specific protocol
1745 * and message pair.
1746 *
1747 * Return: 0 on SUCCESS
1748 */
scmi_protocol_msg_check(const struct scmi_protocol_handle * ph,u32 message_id,u32 * attributes)1749 static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1750 u32 message_id, u32 *attributes)
1751 {
1752 int ret;
1753 struct scmi_xfer *t;
1754
1755 ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1756 sizeof(__le32), 0, &t);
1757 if (ret)
1758 return ret;
1759
1760 put_unaligned_le32(message_id, t->tx.buf);
1761 ret = do_xfer(ph, t);
1762 if (!ret && attributes)
1763 *attributes = get_unaligned_le32(t->rx.buf);
1764 xfer_put(ph, t);
1765
1766 return ret;
1767 }
1768
1769 /**
1770 * struct scmi_iterator - Iterator descriptor
1771 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1772 * a proper custom command payload for each multi-part command request.
1773 * @resp: A reference to the response RX buffer; used by @update_state and
1774 * @process_response to parse the multi-part replies.
1775 * @t: A reference to the underlying xfer initialized and used transparently by
1776 * the iterator internal routines.
1777 * @ph: A reference to the associated protocol handle to be used.
1778 * @ops: A reference to the custom provided iterator operations.
1779 * @state: The current iterator state; used and updated in turn by the iterators
1780 * internal routines and by the caller-provided @scmi_iterator_ops.
1781 * @priv: A reference to optional private data as provided by the caller and
1782 * passed back to the @@scmi_iterator_ops.
1783 */
1784 struct scmi_iterator {
1785 void *msg;
1786 void *resp;
1787 struct scmi_xfer *t;
1788 const struct scmi_protocol_handle *ph;
1789 struct scmi_iterator_ops *ops;
1790 struct scmi_iterator_state state;
1791 void *priv;
1792 };
1793
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1794 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1795 struct scmi_iterator_ops *ops,
1796 unsigned int max_resources, u8 msg_id,
1797 size_t tx_size, void *priv)
1798 {
1799 int ret;
1800 struct scmi_iterator *i;
1801
1802 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1803 if (!i)
1804 return ERR_PTR(-ENOMEM);
1805
1806 i->ph = ph;
1807 i->ops = ops;
1808 i->priv = priv;
1809
1810 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1811 if (ret) {
1812 devm_kfree(ph->dev, i);
1813 return ERR_PTR(ret);
1814 }
1815
1816 i->state.max_resources = max_resources;
1817 i->msg = i->t->tx.buf;
1818 i->resp = i->t->rx.buf;
1819
1820 return i;
1821 }
1822
scmi_iterator_run(void * iter)1823 static int scmi_iterator_run(void *iter)
1824 {
1825 int ret = -EINVAL;
1826 struct scmi_iterator_ops *iops;
1827 const struct scmi_protocol_handle *ph;
1828 struct scmi_iterator_state *st;
1829 struct scmi_iterator *i = iter;
1830
1831 if (!i || !i->ops || !i->ph)
1832 return ret;
1833
1834 iops = i->ops;
1835 ph = i->ph;
1836 st = &i->state;
1837
1838 do {
1839 iops->prepare_message(i->msg, st->desc_index, i->priv);
1840 ret = ph->xops->do_xfer(ph, i->t);
1841 if (ret)
1842 break;
1843
1844 st->rx_len = i->t->rx.len;
1845 ret = iops->update_state(st, i->resp, i->priv);
1846 if (ret)
1847 break;
1848
1849 if (st->num_returned > st->max_resources - st->desc_index) {
1850 dev_err(ph->dev,
1851 "No. of resources can't exceed %d\n",
1852 st->max_resources);
1853 ret = -EINVAL;
1854 break;
1855 }
1856
1857 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1858 st->loop_idx++) {
1859 ret = iops->process_response(ph, i->resp, st, i->priv);
1860 if (ret)
1861 goto out;
1862 }
1863
1864 st->desc_index += st->num_returned;
1865 ph->xops->reset_rx_to_maxsz(ph, i->t);
1866 /*
1867 * check for both returned and remaining to avoid infinite
1868 * loop due to buggy firmware
1869 */
1870 } while (st->num_returned && st->num_remaining);
1871
1872 out:
1873 /* Finalize and destroy iterator */
1874 ph->xops->xfer_put(ph, i->t);
1875 devm_kfree(ph->dev, i);
1876
1877 return ret;
1878 }
1879
1880 struct scmi_msg_get_fc_info {
1881 __le32 domain;
1882 __le32 message_id;
1883 };
1884
1885 struct scmi_msg_resp_desc_fc {
1886 __le32 attr;
1887 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1888 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1889 __le32 rate_limit;
1890 __le32 chan_addr_low;
1891 __le32 chan_addr_high;
1892 __le32 chan_size;
1893 __le32 db_addr_low;
1894 __le32 db_addr_high;
1895 __le32 db_set_lmask;
1896 __le32 db_set_hmask;
1897 __le32 db_preserve_lmask;
1898 __le32 db_preserve_hmask;
1899 };
1900
1901 #define QUIRK_PERF_FC_FORCE \
1902 ({ \
1903 if (pi->proto->id == SCMI_PROTOCOL_PERF && \
1904 message_id == 0x8 /* PERF_LEVEL_GET */) \
1905 attributes |= BIT(0); \
1906 })
1907
1908 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db,u32 * rate_limit)1909 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1910 u8 describe_id, u32 message_id, u32 valid_size,
1911 u32 domain, void __iomem **p_addr,
1912 struct scmi_fc_db_info **p_db, u32 *rate_limit)
1913 {
1914 int ret;
1915 u32 flags;
1916 u64 phys_addr;
1917 u32 attributes;
1918 u8 size;
1919 void __iomem *addr;
1920 struct scmi_xfer *t;
1921 struct scmi_fc_db_info *db = NULL;
1922 struct scmi_msg_get_fc_info *info;
1923 struct scmi_msg_resp_desc_fc *resp;
1924 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1925
1926 /* Check if the MSG_ID supports fastchannel */
1927 ret = scmi_protocol_msg_check(ph, message_id, &attributes);
1928 SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
1929 if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
1930 dev_dbg(ph->dev,
1931 "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
1932 pi->proto->id, message_id, domain, ret);
1933 return;
1934 }
1935
1936 if (!p_addr) {
1937 ret = -EINVAL;
1938 goto err_out;
1939 }
1940
1941 ret = ph->xops->xfer_get_init(ph, describe_id,
1942 sizeof(*info), sizeof(*resp), &t);
1943 if (ret)
1944 goto err_out;
1945
1946 info = t->tx.buf;
1947 info->domain = cpu_to_le32(domain);
1948 info->message_id = cpu_to_le32(message_id);
1949
1950 /*
1951 * Bail out on error leaving fc_info addresses zeroed; this includes
1952 * the case in which the requested domain/message_id does NOT support
1953 * fastchannels at all.
1954 */
1955 ret = ph->xops->do_xfer(ph, t);
1956 if (ret)
1957 goto err_xfer;
1958
1959 resp = t->rx.buf;
1960 flags = le32_to_cpu(resp->attr);
1961 size = le32_to_cpu(resp->chan_size);
1962 if (size != valid_size) {
1963 ret = -EINVAL;
1964 goto err_xfer;
1965 }
1966
1967 if (rate_limit)
1968 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1969
1970 phys_addr = le32_to_cpu(resp->chan_addr_low);
1971 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1972 addr = devm_ioremap(ph->dev, phys_addr, size);
1973 if (!addr) {
1974 ret = -EADDRNOTAVAIL;
1975 goto err_xfer;
1976 }
1977
1978 *p_addr = addr;
1979
1980 if (p_db && SUPPORTS_DOORBELL(flags)) {
1981 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1982 if (!db) {
1983 ret = -ENOMEM;
1984 goto err_db;
1985 }
1986
1987 size = 1 << DOORBELL_REG_WIDTH(flags);
1988 phys_addr = le32_to_cpu(resp->db_addr_low);
1989 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1990 addr = devm_ioremap(ph->dev, phys_addr, size);
1991 if (!addr) {
1992 ret = -EADDRNOTAVAIL;
1993 goto err_db_mem;
1994 }
1995
1996 db->addr = addr;
1997 db->width = size;
1998 db->set = le32_to_cpu(resp->db_set_lmask);
1999 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
2000 db->mask = le32_to_cpu(resp->db_preserve_lmask);
2001 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
2002
2003 *p_db = db;
2004 }
2005
2006 ph->xops->xfer_put(ph, t);
2007
2008 dev_dbg(ph->dev,
2009 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
2010 pi->proto->id, message_id, domain);
2011
2012 return;
2013
2014 err_db_mem:
2015 devm_kfree(ph->dev, db);
2016
2017 err_db:
2018 *p_addr = NULL;
2019
2020 err_xfer:
2021 ph->xops->xfer_put(ph, t);
2022
2023 err_out:
2024 dev_warn(ph->dev,
2025 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
2026 pi->proto->id, message_id, domain, ret);
2027 }
2028
2029 #define SCMI_PROTO_FC_RING_DB(w) \
2030 do { \
2031 u##w val = 0; \
2032 \
2033 if (db->mask) \
2034 val = ioread##w(db->addr) & db->mask; \
2035 iowrite##w((u##w)db->set | val, db->addr); \
2036 } while (0)
2037
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)2038 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
2039 {
2040 if (!db || !db->addr)
2041 return;
2042
2043 if (db->width == 1)
2044 SCMI_PROTO_FC_RING_DB(8);
2045 else if (db->width == 2)
2046 SCMI_PROTO_FC_RING_DB(16);
2047 else if (db->width == 4)
2048 SCMI_PROTO_FC_RING_DB(32);
2049 else /* db->width == 8 */
2050 SCMI_PROTO_FC_RING_DB(64);
2051 }
2052
2053 static const struct scmi_proto_helpers_ops helpers_ops = {
2054 .extended_name_get = scmi_common_extended_name_get,
2055 .get_max_msg_size = scmi_common_get_max_msg_size,
2056 .iter_response_init = scmi_iterator_init,
2057 .iter_response_run = scmi_iterator_run,
2058 .protocol_msg_check = scmi_protocol_msg_check,
2059 .fastchannel_init = scmi_common_fastchannel_init,
2060 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
2061 };
2062
2063 /**
2064 * scmi_revision_area_get - Retrieve version memory area.
2065 *
2066 * @ph: A reference to the protocol handle.
2067 *
2068 * A helper to grab the version memory area reference during SCMI Base protocol
2069 * initialization.
2070 *
2071 * Return: A reference to the version memory area associated to the SCMI
2072 * instance underlying this protocol handle.
2073 */
2074 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)2075 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
2076 {
2077 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2078
2079 return pi->handle->version;
2080 }
2081
2082 /**
2083 * scmi_protocol_version_negotiate - Negotiate protocol version
2084 *
2085 * @ph: A reference to the protocol handle.
2086 *
2087 * An helper to negotiate a protocol version different from the latest
2088 * advertised as supported from the platform: on Success backward
2089 * compatibility is assured by the platform.
2090 *
2091 * Return: 0 on Success
2092 */
scmi_protocol_version_negotiate(struct scmi_protocol_handle * ph)2093 static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
2094 {
2095 int ret;
2096 struct scmi_xfer *t;
2097 struct scmi_protocol_instance *pi = ph_to_pi(ph);
2098
2099 /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2100 ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
2101 if (ret)
2102 return ret;
2103
2104 /* ... then attempt protocol version negotiation */
2105 ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
2106 sizeof(__le32), 0, &t);
2107 if (ret)
2108 return ret;
2109
2110 put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2111 ret = do_xfer(ph, t);
2112 if (!ret)
2113 pi->negotiated_version = pi->proto->supported_version;
2114
2115 xfer_put(ph, t);
2116
2117 return ret;
2118 }
2119
2120 /**
2121 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2122 * instance descriptor.
2123 * @info: The reference to the related SCMI instance.
2124 * @proto: The protocol descriptor.
2125 *
2126 * Allocate a new protocol instance descriptor, using the provided @proto
2127 * description, against the specified SCMI instance @info, and initialize it;
2128 * all resources management is handled via a dedicated per-protocol devres
2129 * group.
2130 *
2131 * Context: Assumes to be called with @protocols_mtx already acquired.
2132 * Return: A reference to a freshly allocated and initialized protocol instance
2133 * or ERR_PTR on failure. On failure the @proto reference is at first
2134 * put using @scmi_protocol_put() before releasing all the devres group.
2135 */
2136 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)2137 scmi_alloc_init_protocol_instance(struct scmi_info *info,
2138 const struct scmi_protocol *proto)
2139 {
2140 int ret = -ENOMEM;
2141 void *gid;
2142 struct scmi_protocol_instance *pi;
2143 const struct scmi_handle *handle = &info->handle;
2144
2145 /* Protocol specific devres group */
2146 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2147 if (!gid) {
2148 scmi_protocol_put(proto);
2149 goto out;
2150 }
2151
2152 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2153 if (!pi)
2154 goto clean;
2155
2156 pi->gid = gid;
2157 pi->proto = proto;
2158 pi->handle = handle;
2159 pi->ph.dev = handle->dev;
2160 pi->ph.xops = &xfer_ops;
2161 pi->ph.hops = &helpers_ops;
2162 pi->ph.set_priv = scmi_set_protocol_priv;
2163 pi->ph.get_priv = scmi_get_protocol_priv;
2164 refcount_set(&pi->users, 1);
2165 /* proto->init is assured NON NULL by scmi_protocol_register */
2166 ret = pi->proto->instance_init(&pi->ph);
2167 if (ret)
2168 goto clean;
2169
2170 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2171 GFP_KERNEL);
2172 if (ret != proto->id)
2173 goto clean;
2174
2175 /*
2176 * Warn but ignore events registration errors since we do not want
2177 * to skip whole protocols if their notifications are messed up.
2178 */
2179 if (pi->proto->events) {
2180 ret = scmi_register_protocol_events(handle, pi->proto->id,
2181 &pi->ph,
2182 pi->proto->events);
2183 if (ret)
2184 dev_warn(handle->dev,
2185 "Protocol:%X - Events Registration Failed - err:%d\n",
2186 pi->proto->id, ret);
2187 }
2188
2189 devres_close_group(handle->dev, pi->gid);
2190 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2191
2192 if (pi->version > proto->supported_version) {
2193 ret = scmi_protocol_version_negotiate(&pi->ph);
2194 if (!ret) {
2195 dev_info(handle->dev,
2196 "Protocol 0x%X successfully negotiated version 0x%X\n",
2197 proto->id, pi->negotiated_version);
2198 } else {
2199 dev_warn(handle->dev,
2200 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2201 pi->version, pi->proto->id);
2202 dev_warn(handle->dev,
2203 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
2204 pi->proto->supported_version);
2205 }
2206 }
2207
2208 return pi;
2209
2210 clean:
2211 /* Take care to put the protocol module's owner before releasing all */
2212 scmi_protocol_put(proto);
2213 devres_release_group(handle->dev, gid);
2214 out:
2215 return ERR_PTR(ret);
2216 }
2217
2218 /**
2219 * scmi_get_protocol_instance - Protocol initialization helper.
2220 * @handle: A reference to the SCMI platform instance.
2221 * @protocol_id: The protocol being requested.
2222 *
2223 * In case the required protocol has never been requested before for this
2224 * instance, allocate and initialize all the needed structures while handling
2225 * resource allocation with a dedicated per-protocol devres subgroup.
2226 *
2227 * Return: A reference to an initialized protocol instance or error on failure:
2228 * in particular returns -EPROBE_DEFER when the desired protocol could
2229 * NOT be found.
2230 */
2231 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)2232 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
2233 {
2234 struct scmi_protocol_instance *pi;
2235 struct scmi_info *info = handle_to_scmi_info(handle);
2236
2237 mutex_lock(&info->protocols_mtx);
2238 pi = idr_find(&info->protocols, protocol_id);
2239
2240 if (pi) {
2241 refcount_inc(&pi->users);
2242 } else {
2243 const struct scmi_protocol *proto;
2244
2245 /* Fails if protocol not registered on bus */
2246 proto = scmi_protocol_get(protocol_id, &info->version);
2247 if (proto)
2248 pi = scmi_alloc_init_protocol_instance(info, proto);
2249 else
2250 pi = ERR_PTR(-EPROBE_DEFER);
2251 }
2252 mutex_unlock(&info->protocols_mtx);
2253
2254 return pi;
2255 }
2256
2257 /**
2258 * scmi_protocol_acquire - Protocol acquire
2259 * @handle: A reference to the SCMI platform instance.
2260 * @protocol_id: The protocol being requested.
2261 *
2262 * Register a new user for the requested protocol on the specified SCMI
2263 * platform instance, possibly triggering its initialization on first user.
2264 *
2265 * Return: 0 if protocol was acquired successfully.
2266 */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)2267 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2268 {
2269 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2270 }
2271
2272 /**
2273 * scmi_protocol_release - Protocol de-initialization helper.
2274 * @handle: A reference to the SCMI platform instance.
2275 * @protocol_id: The protocol being requested.
2276 *
2277 * Remove one user for the specified protocol and triggers de-initialization
2278 * and resources de-allocation once the last user has gone.
2279 */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)2280 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2281 {
2282 struct scmi_info *info = handle_to_scmi_info(handle);
2283 struct scmi_protocol_instance *pi;
2284
2285 mutex_lock(&info->protocols_mtx);
2286 pi = idr_find(&info->protocols, protocol_id);
2287 if (WARN_ON(!pi))
2288 goto out;
2289
2290 if (refcount_dec_and_test(&pi->users)) {
2291 void *gid = pi->gid;
2292
2293 if (pi->proto->events)
2294 scmi_deregister_protocol_events(handle, protocol_id);
2295
2296 if (pi->proto->instance_deinit)
2297 pi->proto->instance_deinit(&pi->ph);
2298
2299 idr_remove(&info->protocols, protocol_id);
2300
2301 scmi_protocol_put(pi->proto);
2302
2303 devres_release_group(handle->dev, gid);
2304 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2305 protocol_id);
2306 }
2307
2308 out:
2309 mutex_unlock(&info->protocols_mtx);
2310 }
2311
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)2312 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2313 u8 *prot_imp)
2314 {
2315 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2316 struct scmi_info *info = handle_to_scmi_info(pi->handle);
2317
2318 info->protocols_imp = prot_imp;
2319 }
2320
2321 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)2322 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2323 {
2324 int i;
2325 struct scmi_info *info = handle_to_scmi_info(handle);
2326 struct scmi_revision_info *rev = handle->version;
2327
2328 if (!info->protocols_imp)
2329 return false;
2330
2331 for (i = 0; i < rev->num_protocols; i++)
2332 if (info->protocols_imp[i] == prot_id)
2333 return true;
2334 return false;
2335 }
2336
2337 struct scmi_protocol_devres {
2338 const struct scmi_handle *handle;
2339 u8 protocol_id;
2340 };
2341
scmi_devm_release_protocol(struct device * dev,void * res)2342 static void scmi_devm_release_protocol(struct device *dev, void *res)
2343 {
2344 struct scmi_protocol_devres *dres = res;
2345
2346 scmi_protocol_release(dres->handle, dres->protocol_id);
2347 }
2348
2349 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)2350 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2351 {
2352 struct scmi_protocol_instance *pi;
2353 struct scmi_protocol_devres *dres;
2354
2355 dres = devres_alloc(scmi_devm_release_protocol,
2356 sizeof(*dres), GFP_KERNEL);
2357 if (!dres)
2358 return ERR_PTR(-ENOMEM);
2359
2360 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2361 if (IS_ERR(pi)) {
2362 devres_free(dres);
2363 return pi;
2364 }
2365
2366 dres->handle = sdev->handle;
2367 dres->protocol_id = protocol_id;
2368 devres_add(&sdev->dev, dres);
2369
2370 return pi;
2371 }
2372
2373 /**
2374 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2375 * @sdev: A reference to an scmi_device whose embedded struct device is to
2376 * be used for devres accounting.
2377 * @protocol_id: The protocol being requested.
2378 * @ph: A pointer reference used to pass back the associated protocol handle.
2379 *
2380 * Get hold of a protocol accounting for its usage, eventually triggering its
2381 * initialization, and returning the protocol specific operations and related
2382 * protocol handle which will be used as first argument in most of the
2383 * protocols operations methods.
2384 * Being a devres based managed method, protocol hold will be automatically
2385 * released, and possibly de-initialized on last user, once the SCMI driver
2386 * owning the scmi_device is unbound from it.
2387 *
2388 * Return: A reference to the requested protocol operations or error.
2389 * Must be checked for errors by caller.
2390 */
2391 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)2392 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2393 struct scmi_protocol_handle **ph)
2394 {
2395 struct scmi_protocol_instance *pi;
2396
2397 if (!ph)
2398 return ERR_PTR(-EINVAL);
2399
2400 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2401 if (IS_ERR(pi))
2402 return pi;
2403
2404 *ph = &pi->ph;
2405
2406 return pi->proto->ops;
2407 }
2408
2409 /**
2410 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2411 * @sdev: A reference to an scmi_device whose embedded struct device is to
2412 * be used for devres accounting.
2413 * @protocol_id: The protocol being requested.
2414 *
2415 * Get hold of a protocol accounting for its usage, possibly triggering its
2416 * initialization but without getting access to its protocol specific operations
2417 * and handle.
2418 *
2419 * Being a devres based managed method, protocol hold will be automatically
2420 * released, and possibly de-initialized on last user, once the SCMI driver
2421 * owning the scmi_device is unbound from it.
2422 *
2423 * Return: 0 on SUCCESS
2424 */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)2425 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2426 u8 protocol_id)
2427 {
2428 struct scmi_protocol_instance *pi;
2429
2430 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2431 if (IS_ERR(pi))
2432 return PTR_ERR(pi);
2433
2434 return 0;
2435 }
2436
scmi_devm_protocol_match(struct device * dev,void * res,void * data)2437 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2438 {
2439 struct scmi_protocol_devres *dres = res;
2440
2441 if (WARN_ON(!dres || !data))
2442 return 0;
2443
2444 return dres->protocol_id == *((u8 *)data);
2445 }
2446
2447 /**
2448 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2449 * @sdev: A reference to an scmi_device whose embedded struct device is to
2450 * be used for devres accounting.
2451 * @protocol_id: The protocol being requested.
2452 *
2453 * Explicitly release a protocol hold previously obtained calling the above
2454 * @scmi_devm_protocol_get.
2455 */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)2456 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2457 {
2458 int ret;
2459
2460 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2461 scmi_devm_protocol_match, &protocol_id);
2462 WARN_ON(ret);
2463 }
2464
2465 /**
2466 * scmi_is_transport_atomic - Method to check if underlying transport for an
2467 * SCMI instance is configured as atomic.
2468 *
2469 * @handle: A reference to the SCMI platform instance.
2470 * @atomic_threshold: An optional return value for the system wide currently
2471 * configured threshold for atomic operations.
2472 *
2473 * Return: True if transport is configured as atomic
2474 */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)2475 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2476 unsigned int *atomic_threshold)
2477 {
2478 bool ret;
2479 struct scmi_info *info = handle_to_scmi_info(handle);
2480
2481 ret = info->desc->atomic_enabled &&
2482 is_transport_polling_capable(info->desc);
2483 if (ret && atomic_threshold)
2484 *atomic_threshold = info->desc->atomic_threshold;
2485
2486 return ret;
2487 }
2488
2489 /**
2490 * scmi_handle_get() - Get the SCMI handle for a device
2491 *
2492 * @dev: pointer to device for which we want SCMI handle
2493 *
2494 * NOTE: The function does not track individual clients of the framework
2495 * and is expected to be maintained by caller of SCMI protocol library.
2496 * scmi_handle_put must be balanced with successful scmi_handle_get
2497 *
2498 * Return: pointer to handle if successful, NULL on error
2499 */
scmi_handle_get(struct device * dev)2500 static struct scmi_handle *scmi_handle_get(struct device *dev)
2501 {
2502 struct list_head *p;
2503 struct scmi_info *info;
2504 struct scmi_handle *handle = NULL;
2505
2506 mutex_lock(&scmi_list_mutex);
2507 list_for_each(p, &scmi_list) {
2508 info = list_entry(p, struct scmi_info, node);
2509 if (dev->parent == info->dev) {
2510 info->users++;
2511 handle = &info->handle;
2512 break;
2513 }
2514 }
2515 mutex_unlock(&scmi_list_mutex);
2516
2517 return handle;
2518 }
2519
2520 /**
2521 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2522 *
2523 * @handle: handle acquired by scmi_handle_get
2524 *
2525 * NOTE: The function does not track individual clients of the framework
2526 * and is expected to be maintained by caller of SCMI protocol library.
2527 * scmi_handle_put must be balanced with successful scmi_handle_get
2528 *
2529 * Return: 0 is successfully released
2530 * if null was passed, it returns -EINVAL;
2531 */
scmi_handle_put(const struct scmi_handle * handle)2532 static int scmi_handle_put(const struct scmi_handle *handle)
2533 {
2534 struct scmi_info *info;
2535
2536 if (!handle)
2537 return -EINVAL;
2538
2539 info = handle_to_scmi_info(handle);
2540 mutex_lock(&scmi_list_mutex);
2541 if (!WARN_ON(!info->users))
2542 info->users--;
2543 mutex_unlock(&scmi_list_mutex);
2544
2545 return 0;
2546 }
2547
scmi_device_link_add(struct device * consumer,struct device * supplier)2548 static void scmi_device_link_add(struct device *consumer,
2549 struct device *supplier)
2550 {
2551 struct device_link *link;
2552
2553 link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2554
2555 WARN_ON(!link);
2556 }
2557
scmi_set_handle(struct scmi_device * scmi_dev)2558 static void scmi_set_handle(struct scmi_device *scmi_dev)
2559 {
2560 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2561 if (scmi_dev->handle)
2562 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2563 }
2564
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)2565 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2566 struct scmi_xfers_info *info)
2567 {
2568 int i;
2569 struct scmi_xfer *xfer;
2570 struct device *dev = sinfo->dev;
2571 const struct scmi_desc *desc = sinfo->desc;
2572
2573 /* Pre-allocated messages, no more than what hdr.seq can support */
2574 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2575 dev_err(dev,
2576 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2577 info->max_msg, MSG_TOKEN_MAX);
2578 return -EINVAL;
2579 }
2580
2581 hash_init(info->pending_xfers);
2582
2583 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2584 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2585 GFP_KERNEL);
2586 if (!info->xfer_alloc_table)
2587 return -ENOMEM;
2588
2589 /*
2590 * Preallocate a number of xfers equal to max inflight messages,
2591 * pre-initialize the buffer pointer to pre-allocated buffers and
2592 * attach all of them to the free list
2593 */
2594 INIT_HLIST_HEAD(&info->free_xfers);
2595 for (i = 0; i < info->max_msg; i++) {
2596 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2597 if (!xfer)
2598 return -ENOMEM;
2599
2600 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2601 GFP_KERNEL);
2602 if (!xfer->rx.buf)
2603 return -ENOMEM;
2604
2605 xfer->tx.buf = xfer->rx.buf;
2606 init_completion(&xfer->done);
2607 spin_lock_init(&xfer->lock);
2608
2609 /* Add initialized xfer to the free list */
2610 hlist_add_head(&xfer->node, &info->free_xfers);
2611 }
2612
2613 spin_lock_init(&info->xfer_lock);
2614
2615 return 0;
2616 }
2617
scmi_channels_max_msg_configure(struct scmi_info * sinfo)2618 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2619 {
2620 const struct scmi_desc *desc = sinfo->desc;
2621
2622 if (!desc->ops->get_max_msg) {
2623 sinfo->tx_minfo.max_msg = desc->max_msg;
2624 sinfo->rx_minfo.max_msg = desc->max_msg;
2625 } else {
2626 struct scmi_chan_info *base_cinfo;
2627
2628 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2629 if (!base_cinfo)
2630 return -EINVAL;
2631 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2632
2633 /* RX channel is optional so can be skipped */
2634 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2635 if (base_cinfo)
2636 sinfo->rx_minfo.max_msg =
2637 desc->ops->get_max_msg(base_cinfo);
2638 }
2639
2640 return 0;
2641 }
2642
scmi_xfer_info_init(struct scmi_info * sinfo)2643 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2644 {
2645 int ret;
2646
2647 ret = scmi_channels_max_msg_configure(sinfo);
2648 if (ret)
2649 return ret;
2650
2651 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2652 if (!ret && !idr_is_empty(&sinfo->rx_idr))
2653 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2654
2655 return ret;
2656 }
2657
scmi_chan_setup(struct scmi_info * info,struct device_node * of_node,int prot_id,bool tx)2658 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2659 int prot_id, bool tx)
2660 {
2661 int ret, idx;
2662 char name[32];
2663 struct scmi_chan_info *cinfo;
2664 struct idr *idr;
2665 struct scmi_device *tdev = NULL;
2666
2667 /* Transmit channel is first entry i.e. index 0 */
2668 idx = tx ? 0 : 1;
2669 idr = tx ? &info->tx_idr : &info->rx_idr;
2670
2671 if (!info->desc->ops->chan_available(of_node, idx)) {
2672 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2673 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2674 return -EINVAL;
2675 goto idr_alloc;
2676 }
2677
2678 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2679 if (!cinfo)
2680 return -ENOMEM;
2681
2682 cinfo->is_p2a = !tx;
2683 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2684 cinfo->max_msg_size = info->desc->max_msg_size;
2685
2686 /* Create a unique name for this transport device */
2687 snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2688 idx ? "rx" : "tx", prot_id);
2689 /* Create a uniquely named, dedicated transport device for this chan */
2690 tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2691 if (!tdev) {
2692 dev_err(info->dev,
2693 "failed to create transport device (%s)\n", name);
2694 devm_kfree(info->dev, cinfo);
2695 return -EINVAL;
2696 }
2697 of_node_get(of_node);
2698
2699 cinfo->id = prot_id;
2700 cinfo->dev = &tdev->dev;
2701 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2702 if (ret) {
2703 of_node_put(of_node);
2704 scmi_device_destroy(info->dev, prot_id, name);
2705 devm_kfree(info->dev, cinfo);
2706 return ret;
2707 }
2708
2709 if (tx && is_polling_required(cinfo, info->desc)) {
2710 if (is_transport_polling_capable(info->desc))
2711 dev_info(&tdev->dev,
2712 "Enabled polling mode TX channel - prot_id:%d\n",
2713 prot_id);
2714 else
2715 dev_warn(&tdev->dev,
2716 "Polling mode NOT supported by transport.\n");
2717 }
2718
2719 idr_alloc:
2720 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2721 if (ret != prot_id) {
2722 dev_err(info->dev,
2723 "unable to allocate SCMI idr slot err %d\n", ret);
2724 /* Destroy channel and device only if created by this call. */
2725 if (tdev) {
2726 of_node_put(of_node);
2727 scmi_device_destroy(info->dev, prot_id, name);
2728 devm_kfree(info->dev, cinfo);
2729 }
2730 return ret;
2731 }
2732
2733 cinfo->handle = &info->handle;
2734 return 0;
2735 }
2736
2737 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device_node * of_node,int prot_id)2738 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2739 int prot_id)
2740 {
2741 int ret = scmi_chan_setup(info, of_node, prot_id, true);
2742
2743 if (!ret) {
2744 /* Rx is optional, report only memory errors */
2745 ret = scmi_chan_setup(info, of_node, prot_id, false);
2746 if (ret && ret != -ENOMEM)
2747 ret = 0;
2748 }
2749
2750 if (ret)
2751 dev_err(info->dev,
2752 "failed to setup channel for protocol:0x%X\n", prot_id);
2753
2754 return ret;
2755 }
2756
2757 /**
2758 * scmi_channels_setup - Helper to initialize all required channels
2759 *
2760 * @info: The SCMI instance descriptor.
2761 *
2762 * Initialize all the channels found described in the DT against the underlying
2763 * configured transport using custom defined dedicated devices instead of
2764 * borrowing devices from the SCMI drivers; this way channels are initialized
2765 * upfront during core SCMI stack probing and are no more coupled with SCMI
2766 * devices used by SCMI drivers.
2767 *
2768 * Note that, even though a pair of TX/RX channels is associated to each
2769 * protocol defined in the DT, a distinct freshly initialized channel is
2770 * created only if the DT node for the protocol at hand describes a dedicated
2771 * channel: in all the other cases the common BASE protocol channel is reused.
2772 *
2773 * Return: 0 on Success
2774 */
scmi_channels_setup(struct scmi_info * info)2775 static int scmi_channels_setup(struct scmi_info *info)
2776 {
2777 int ret;
2778 struct device_node *top_np = info->dev->of_node;
2779
2780 /* Initialize a common generic channel at first */
2781 ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2782 if (ret)
2783 return ret;
2784
2785 for_each_available_child_of_node_scoped(top_np, child) {
2786 u32 prot_id;
2787
2788 if (of_property_read_u32(child, "reg", &prot_id))
2789 continue;
2790
2791 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2792 dev_err(info->dev,
2793 "Out of range protocol %d\n", prot_id);
2794
2795 ret = scmi_txrx_setup(info, child, prot_id);
2796 if (ret)
2797 return ret;
2798 }
2799
2800 return 0;
2801 }
2802
scmi_chan_destroy(int id,void * p,void * idr)2803 static int scmi_chan_destroy(int id, void *p, void *idr)
2804 {
2805 struct scmi_chan_info *cinfo = p;
2806
2807 if (cinfo->dev) {
2808 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2809 struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2810
2811 of_node_put(cinfo->dev->of_node);
2812 scmi_device_destroy(info->dev, id, sdev->name);
2813 cinfo->dev = NULL;
2814 }
2815
2816 idr_remove(idr, id);
2817
2818 return 0;
2819 }
2820
scmi_cleanup_channels(struct scmi_info * info,struct idr * idr)2821 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2822 {
2823 /* At first free all channels at the transport layer ... */
2824 idr_for_each(idr, info->desc->ops->chan_free, idr);
2825
2826 /* ...then destroy all underlying devices */
2827 idr_for_each(idr, scmi_chan_destroy, idr);
2828
2829 idr_destroy(idr);
2830 }
2831
scmi_cleanup_txrx_channels(struct scmi_info * info)2832 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2833 {
2834 scmi_cleanup_channels(info, &info->tx_idr);
2835
2836 scmi_cleanup_channels(info, &info->rx_idr);
2837 }
2838
scmi_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2839 static int scmi_bus_notifier(struct notifier_block *nb,
2840 unsigned long action, void *data)
2841 {
2842 struct scmi_info *info = bus_nb_to_scmi_info(nb);
2843 struct scmi_device *sdev = to_scmi_dev(data);
2844
2845 /* Skip devices of different SCMI instances */
2846 if (sdev->dev.parent != info->dev)
2847 return NOTIFY_DONE;
2848
2849 switch (action) {
2850 case BUS_NOTIFY_BIND_DRIVER:
2851 /* setup handle now as the transport is ready */
2852 scmi_set_handle(sdev);
2853 break;
2854 case BUS_NOTIFY_UNBOUND_DRIVER:
2855 scmi_handle_put(sdev->handle);
2856 sdev->handle = NULL;
2857 break;
2858 default:
2859 return NOTIFY_DONE;
2860 }
2861
2862 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2863 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2864 "about to be BOUND." : "UNBOUND.");
2865
2866 return NOTIFY_OK;
2867 }
2868
scmi_device_request_notifier(struct notifier_block * nb,unsigned long action,void * data)2869 static int scmi_device_request_notifier(struct notifier_block *nb,
2870 unsigned long action, void *data)
2871 {
2872 struct device_node *np;
2873 struct scmi_device_id *id_table = data;
2874 struct scmi_info *info = req_nb_to_scmi_info(nb);
2875
2876 np = idr_find(&info->active_protocols, id_table->protocol_id);
2877 if (!np)
2878 return NOTIFY_DONE;
2879
2880 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2881 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2882 id_table->name, id_table->protocol_id);
2883
2884 switch (action) {
2885 case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2886 scmi_create_protocol_devices(np, info, id_table->protocol_id,
2887 id_table->name);
2888 break;
2889 case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2890 scmi_destroy_protocol_devices(info, id_table->protocol_id,
2891 id_table->name);
2892 break;
2893 default:
2894 return NOTIFY_DONE;
2895 }
2896
2897 return NOTIFY_OK;
2898 }
2899
2900 static const char * const dbg_counter_strs[] = {
2901 "sent_ok",
2902 "sent_fail",
2903 "sent_fail_polling_unsupported",
2904 "sent_fail_channel_not_found",
2905 "response_ok",
2906 "notification_ok",
2907 "delayed_response_ok",
2908 "xfers_response_timeout",
2909 "xfers_response_polled_timeout",
2910 "response_polled_ok",
2911 "err_msg_unexpected",
2912 "err_msg_invalid",
2913 "err_msg_nomem",
2914 "err_protocol",
2915 };
2916
reset_all_on_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)2917 static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
2918 size_t count, loff_t *ppos)
2919 {
2920 struct scmi_debug_info *dbg = filp->private_data;
2921
2922 for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
2923 atomic_set(&dbg->counters[i], 0);
2924
2925 return count;
2926 }
2927
2928 static const struct file_operations fops_reset_counts = {
2929 .owner = THIS_MODULE,
2930 .open = simple_open,
2931 .write = reset_all_on_write,
2932 };
2933
scmi_debugfs_counters_setup(struct scmi_debug_info * dbg,struct dentry * trans)2934 static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
2935 struct dentry *trans)
2936 {
2937 struct dentry *counters;
2938 int idx;
2939
2940 counters = debugfs_create_dir("counters", trans);
2941
2942 for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
2943 debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
2944 &dbg->counters[idx]);
2945
2946 debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
2947 }
2948
scmi_debugfs_common_cleanup(void * d)2949 static void scmi_debugfs_common_cleanup(void *d)
2950 {
2951 struct scmi_debug_info *dbg = d;
2952
2953 if (!dbg)
2954 return;
2955
2956 debugfs_remove_recursive(dbg->top_dentry);
2957 kfree(dbg->name);
2958 kfree(dbg->type);
2959 }
2960
scmi_debugfs_common_setup(struct scmi_info * info)2961 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2962 {
2963 char top_dir[16];
2964 struct dentry *trans, *top_dentry;
2965 struct scmi_debug_info *dbg;
2966 const char *c_ptr = NULL;
2967
2968 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2969 if (!dbg)
2970 return NULL;
2971
2972 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2973 if (!dbg->name) {
2974 devm_kfree(info->dev, dbg);
2975 return NULL;
2976 }
2977
2978 of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2979 dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2980 if (!dbg->type) {
2981 kfree(dbg->name);
2982 devm_kfree(info->dev, dbg);
2983 return NULL;
2984 }
2985
2986 snprintf(top_dir, 16, "%d", info->id);
2987 top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2988 trans = debugfs_create_dir("transport", top_dentry);
2989
2990 dbg->is_atomic = info->desc->atomic_enabled &&
2991 is_transport_polling_capable(info->desc);
2992
2993 debugfs_create_str("instance_name", 0400, top_dentry,
2994 (char **)&dbg->name);
2995
2996 debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2997 (u32 *)&info->desc->atomic_threshold);
2998
2999 debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
3000
3001 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
3002
3003 debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
3004 (u32 *)&info->desc->max_rx_timeout_ms);
3005
3006 debugfs_create_u32("max_msg_size", 0400, trans,
3007 (u32 *)&info->desc->max_msg_size);
3008
3009 debugfs_create_u32("tx_max_msg", 0400, trans,
3010 (u32 *)&info->tx_minfo.max_msg);
3011
3012 debugfs_create_u32("rx_max_msg", 0400, trans,
3013 (u32 *)&info->rx_minfo.max_msg);
3014
3015 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
3016 scmi_debugfs_counters_setup(dbg, trans);
3017
3018 dbg->top_dentry = top_dentry;
3019
3020 if (devm_add_action_or_reset(info->dev,
3021 scmi_debugfs_common_cleanup, dbg))
3022 return NULL;
3023
3024 return dbg;
3025 }
3026
scmi_debugfs_raw_mode_setup(struct scmi_info * info)3027 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
3028 {
3029 int id, num_chans = 0, ret = 0;
3030 struct scmi_chan_info *cinfo;
3031 u8 channels[SCMI_MAX_CHANNELS] = {};
3032 DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
3033
3034 if (!info->dbg)
3035 return -EINVAL;
3036
3037 /* Enumerate all channels to collect their ids */
3038 idr_for_each_entry(&info->tx_idr, cinfo, id) {
3039 /*
3040 * Cannot happen, but be defensive.
3041 * Zero as num_chans is ok, warn and carry on.
3042 */
3043 if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
3044 dev_warn(info->dev,
3045 "SCMI RAW - Error enumerating channels\n");
3046 break;
3047 }
3048
3049 if (!test_bit(cinfo->id, protos)) {
3050 channels[num_chans++] = cinfo->id;
3051 set_bit(cinfo->id, protos);
3052 }
3053 }
3054
3055 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3056 info->id, channels, num_chans,
3057 info->desc, info->tx_minfo.max_msg);
3058 if (IS_ERR(info->raw)) {
3059 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3060 ret = PTR_ERR(info->raw);
3061 info->raw = NULL;
3062 }
3063
3064 return ret;
3065 }
3066
scmi_transport_setup(struct device * dev)3067 static const struct scmi_desc *scmi_transport_setup(struct device *dev)
3068 {
3069 struct scmi_transport *trans;
3070 int ret;
3071
3072 trans = dev_get_platdata(dev);
3073 if (!trans || !trans->supplier || !trans->core_ops)
3074 return NULL;
3075
3076 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3077 dev_err(dev,
3078 "Adding link to supplier transport device failed\n");
3079 return NULL;
3080 }
3081
3082 /* Provide core transport ops */
3083 *trans->core_ops = &scmi_trans_core_ops;
3084
3085 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3086
3087 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3088 &trans->desc.max_rx_timeout_ms);
3089 if (ret && ret != -EINVAL)
3090 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3091
3092 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3093 &trans->desc.max_msg_size);
3094 if (ret && ret != -EINVAL)
3095 dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3096
3097 ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3098 &trans->desc.max_msg);
3099 if (ret && ret != -EINVAL)
3100 dev_err(dev, "Malformed arm,max-msg DT property.\n");
3101
3102 dev_info(dev,
3103 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3104 trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
3105 trans->desc.max_msg);
3106
3107 /* System wide atomic threshold for atomic ops .. if any */
3108 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3109 &trans->desc.atomic_threshold))
3110 dev_info(dev,
3111 "SCMI System wide atomic threshold set to %u us\n",
3112 trans->desc.atomic_threshold);
3113
3114 return &trans->desc;
3115 }
3116
scmi_enable_matching_quirks(struct scmi_info * info)3117 static void scmi_enable_matching_quirks(struct scmi_info *info)
3118 {
3119 struct scmi_revision_info *rev = &info->version;
3120
3121 dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
3122 rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
3123
3124 /* Enable applicable quirks */
3125 scmi_quirks_enable(info->dev, rev->vendor_id,
3126 rev->sub_vendor_id, rev->impl_ver);
3127 }
3128
scmi_probe(struct platform_device * pdev)3129 static int scmi_probe(struct platform_device *pdev)
3130 {
3131 int ret;
3132 char *err_str = "probe failure\n";
3133 struct scmi_handle *handle;
3134 const struct scmi_desc *desc;
3135 struct scmi_info *info;
3136 bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
3137 struct device *dev = &pdev->dev;
3138 struct device_node *child, *np = dev->of_node;
3139
3140 desc = scmi_transport_setup(dev);
3141 if (!desc) {
3142 err_str = "transport invalid\n";
3143 ret = -EINVAL;
3144 goto out_err;
3145 }
3146
3147 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3148 if (!info)
3149 return -ENOMEM;
3150
3151 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3152 if (info->id < 0)
3153 return info->id;
3154
3155 info->dev = dev;
3156 info->desc = desc;
3157 info->bus_nb.notifier_call = scmi_bus_notifier;
3158 info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3159 INIT_LIST_HEAD(&info->node);
3160 idr_init(&info->protocols);
3161 mutex_init(&info->protocols_mtx);
3162 idr_init(&info->active_protocols);
3163 mutex_init(&info->devreq_mtx);
3164
3165 platform_set_drvdata(pdev, info);
3166 idr_init(&info->tx_idr);
3167 idr_init(&info->rx_idr);
3168
3169 handle = &info->handle;
3170 handle->dev = info->dev;
3171 handle->version = &info->version;
3172 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3173 handle->devm_protocol_get = scmi_devm_protocol_get;
3174 handle->devm_protocol_put = scmi_devm_protocol_put;
3175 handle->is_transport_atomic = scmi_is_transport_atomic;
3176
3177 /* Setup all channels described in the DT at first */
3178 ret = scmi_channels_setup(info);
3179 if (ret) {
3180 err_str = "failed to setup channels\n";
3181 goto clear_ida;
3182 }
3183
3184 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3185 if (ret) {
3186 err_str = "failed to register bus notifier\n";
3187 goto clear_txrx_setup;
3188 }
3189
3190 ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
3191 &info->dev_req_nb);
3192 if (ret) {
3193 err_str = "failed to register device notifier\n";
3194 goto clear_bus_notifier;
3195 }
3196
3197 ret = scmi_xfer_info_init(info);
3198 if (ret) {
3199 err_str = "failed to init xfers pool\n";
3200 goto clear_dev_req_notifier;
3201 }
3202
3203 if (scmi_top_dentry) {
3204 info->dbg = scmi_debugfs_common_setup(info);
3205 if (!info->dbg)
3206 dev_warn(dev, "Failed to setup SCMI debugfs.\n");
3207
3208 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
3209 ret = scmi_debugfs_raw_mode_setup(info);
3210 if (!coex) {
3211 if (ret)
3212 goto clear_dev_req_notifier;
3213
3214 /* Bail out anyway when coex disabled. */
3215 return 0;
3216 }
3217
3218 /* Coex enabled, carry on in any case. */
3219 dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
3220 }
3221 }
3222
3223 if (scmi_notification_init(handle))
3224 dev_err(dev, "SCMI Notifications NOT available.\n");
3225
3226 if (info->desc->atomic_enabled &&
3227 !is_transport_polling_capable(info->desc))
3228 dev_err(dev,
3229 "Transport is not polling capable. Atomic mode not supported.\n");
3230
3231 /*
3232 * Trigger SCMI Base protocol initialization.
3233 * It's mandatory and won't be ever released/deinit until the
3234 * SCMI stack is shutdown/unloaded as a whole.
3235 */
3236 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
3237 if (ret) {
3238 err_str = "unable to communicate with SCMI\n";
3239 if (coex) {
3240 dev_err(dev, "%s", err_str);
3241 return 0;
3242 }
3243 goto notification_exit;
3244 }
3245
3246 mutex_lock(&scmi_list_mutex);
3247 list_add_tail(&info->node, &scmi_list);
3248 mutex_unlock(&scmi_list_mutex);
3249
3250 scmi_enable_matching_quirks(info);
3251
3252 for_each_available_child_of_node(np, child) {
3253 u32 prot_id;
3254
3255 if (of_property_read_u32(child, "reg", &prot_id))
3256 continue;
3257
3258 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
3259 dev_err(dev, "Out of range protocol %d\n", prot_id);
3260
3261 if (!scmi_is_protocol_implemented(handle, prot_id)) {
3262 dev_err(dev, "SCMI protocol %d not implemented\n",
3263 prot_id);
3264 continue;
3265 }
3266
3267 /*
3268 * Save this valid DT protocol descriptor amongst
3269 * @active_protocols for this SCMI instance/
3270 */
3271 ret = idr_alloc(&info->active_protocols, child,
3272 prot_id, prot_id + 1, GFP_KERNEL);
3273 if (ret != prot_id) {
3274 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
3275 prot_id);
3276 continue;
3277 }
3278
3279 of_node_get(child);
3280 scmi_create_protocol_devices(child, info, prot_id, NULL);
3281 }
3282
3283 return 0;
3284
3285 notification_exit:
3286 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3287 scmi_raw_mode_cleanup(info->raw);
3288 scmi_notification_exit(&info->handle);
3289 clear_dev_req_notifier:
3290 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3291 &info->dev_req_nb);
3292 clear_bus_notifier:
3293 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3294 clear_txrx_setup:
3295 scmi_cleanup_txrx_channels(info);
3296 clear_ida:
3297 ida_free(&scmi_id, info->id);
3298
3299 out_err:
3300 return dev_err_probe(dev, ret, "%s", err_str);
3301 }
3302
scmi_remove(struct platform_device * pdev)3303 static void scmi_remove(struct platform_device *pdev)
3304 {
3305 int id;
3306 struct scmi_info *info = platform_get_drvdata(pdev);
3307 struct device_node *child;
3308
3309 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3310 scmi_raw_mode_cleanup(info->raw);
3311
3312 mutex_lock(&scmi_list_mutex);
3313 if (info->users)
3314 dev_warn(&pdev->dev,
3315 "Still active SCMI users will be forcibly unbound.\n");
3316 list_del(&info->node);
3317 mutex_unlock(&scmi_list_mutex);
3318
3319 scmi_notification_exit(&info->handle);
3320
3321 mutex_lock(&info->protocols_mtx);
3322 idr_destroy(&info->protocols);
3323 mutex_unlock(&info->protocols_mtx);
3324
3325 idr_for_each_entry(&info->active_protocols, child, id)
3326 of_node_put(child);
3327 idr_destroy(&info->active_protocols);
3328
3329 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3330 &info->dev_req_nb);
3331 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3332
3333 /* Safe to free channels since no more users */
3334 scmi_cleanup_txrx_channels(info);
3335
3336 ida_free(&scmi_id, info->id);
3337 }
3338
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)3339 static ssize_t protocol_version_show(struct device *dev,
3340 struct device_attribute *attr, char *buf)
3341 {
3342 struct scmi_info *info = dev_get_drvdata(dev);
3343
3344 return sprintf(buf, "%u.%u\n", info->version.major_ver,
3345 info->version.minor_ver);
3346 }
3347 static DEVICE_ATTR_RO(protocol_version);
3348
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)3349 static ssize_t firmware_version_show(struct device *dev,
3350 struct device_attribute *attr, char *buf)
3351 {
3352 struct scmi_info *info = dev_get_drvdata(dev);
3353
3354 return sprintf(buf, "0x%x\n", info->version.impl_ver);
3355 }
3356 static DEVICE_ATTR_RO(firmware_version);
3357
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3358 static ssize_t vendor_id_show(struct device *dev,
3359 struct device_attribute *attr, char *buf)
3360 {
3361 struct scmi_info *info = dev_get_drvdata(dev);
3362
3363 return sprintf(buf, "%s\n", info->version.vendor_id);
3364 }
3365 static DEVICE_ATTR_RO(vendor_id);
3366
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3367 static ssize_t sub_vendor_id_show(struct device *dev,
3368 struct device_attribute *attr, char *buf)
3369 {
3370 struct scmi_info *info = dev_get_drvdata(dev);
3371
3372 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3373 }
3374 static DEVICE_ATTR_RO(sub_vendor_id);
3375
3376 static struct attribute *versions_attrs[] = {
3377 &dev_attr_firmware_version.attr,
3378 &dev_attr_protocol_version.attr,
3379 &dev_attr_vendor_id.attr,
3380 &dev_attr_sub_vendor_id.attr,
3381 NULL,
3382 };
3383 ATTRIBUTE_GROUPS(versions);
3384
3385 static struct platform_driver scmi_driver = {
3386 .driver = {
3387 .name = "arm-scmi",
3388 .suppress_bind_attrs = true,
3389 .dev_groups = versions_groups,
3390 },
3391 .probe = scmi_probe,
3392 .remove = scmi_remove,
3393 };
3394
scmi_debugfs_init(void)3395 static struct dentry *scmi_debugfs_init(void)
3396 {
3397 struct dentry *d;
3398
3399 d = debugfs_create_dir("scmi", NULL);
3400 if (IS_ERR(d)) {
3401 pr_err("Could NOT create SCMI top dentry.\n");
3402 return NULL;
3403 }
3404
3405 return d;
3406 }
3407
scmi_driver_init(void)3408 static int __init scmi_driver_init(void)
3409 {
3410 scmi_quirks_initialize();
3411
3412 /* Bail out if no SCMI transport was configured */
3413 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3414 return -EINVAL;
3415
3416 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
3417 scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
3418
3419 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
3420 scmi_trans_core_ops.msg = scmi_message_operations_get();
3421
3422 if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3423 scmi_top_dentry = scmi_debugfs_init();
3424
3425 scmi_base_register();
3426
3427 scmi_clock_register();
3428 scmi_perf_register();
3429 scmi_power_register();
3430 scmi_reset_register();
3431 scmi_sensors_register();
3432 scmi_voltage_register();
3433 scmi_system_register();
3434 scmi_powercap_register();
3435 scmi_pinctrl_register();
3436
3437 return platform_driver_register(&scmi_driver);
3438 }
3439 module_init(scmi_driver_init);
3440
scmi_driver_exit(void)3441 static void __exit scmi_driver_exit(void)
3442 {
3443 scmi_base_unregister();
3444
3445 scmi_clock_unregister();
3446 scmi_perf_unregister();
3447 scmi_power_unregister();
3448 scmi_reset_unregister();
3449 scmi_sensors_unregister();
3450 scmi_voltage_unregister();
3451 scmi_system_unregister();
3452 scmi_powercap_unregister();
3453 scmi_pinctrl_unregister();
3454
3455 platform_driver_unregister(&scmi_driver);
3456
3457 debugfs_remove_recursive(scmi_top_dentry);
3458 }
3459 module_exit(scmi_driver_exit);
3460
3461 MODULE_ALIAS("platform:arm-scmi");
3462 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3463 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3464 MODULE_LICENSE("GPL v2");
3465