1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2024 ARM Ltd.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/idr.h>
24 #include <linux/io.h>
25 #include <linux/io-64-nonatomic-hi-lo.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/hashtable.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/platform_device.h>
33 #include <linux/processor.h>
34 #include <linux/refcount.h>
35 #include <linux/slab.h>
36 #include <linux/xarray.h>
37
38 #include "common.h"
39 #include "notify.h"
40
41 #include "raw_mode.h"
42
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/scmi.h>
45
46 static DEFINE_IDA(scmi_id);
47
48 static DEFINE_XARRAY(scmi_protocols);
49
50 /* List of all SCMI devices active in system */
51 static LIST_HEAD(scmi_list);
52 /* Protection for the entire list */
53 static DEFINE_MUTEX(scmi_list_mutex);
54 /* Track the unique id for the transfers for debug & profiling purpose */
55 static atomic_t transfer_last_id;
56
57 static struct dentry *scmi_top_dentry;
58
59 /**
60 * struct scmi_xfers_info - Structure to manage transfer information
61 *
62 * @xfer_alloc_table: Bitmap table for allocated messages.
63 * Index of this bitmap table is also used for message
64 * sequence identifier.
65 * @xfer_lock: Protection for message allocation
66 * @max_msg: Maximum number of messages that can be pending
67 * @free_xfers: A free list for available to use xfers. It is initialized with
68 * a number of xfers equal to the maximum allowed in-flight
69 * messages.
70 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
71 * currently in-flight messages.
72 */
73 struct scmi_xfers_info {
74 unsigned long *xfer_alloc_table;
75 spinlock_t xfer_lock;
76 int max_msg;
77 struct hlist_head free_xfers;
78 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
79 };
80
81 /**
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
83 * @handle: Reference to the SCMI handle associated to this protocol instance.
84 * @proto: A reference to the protocol descriptor.
85 * @gid: A reference for per-protocol devres management.
86 * @users: A refcount to track effective users of this protocol.
87 * @priv: Reference for optional protocol private data.
88 * @version: Protocol version supported by the platform as detected at runtime.
89 * @negotiated_version: When the platform supports a newer protocol version,
90 * the agent will try to negotiate with the platform the
91 * usage of the newest version known to it, since
92 * backward compatibility is NOT automatically assured.
93 * This field is NON-zero when a successful negotiation
94 * has completed.
95 * @ph: An embedded protocol handle that will be passed down to protocol
96 * initialization code to identify this instance.
97 *
98 * Each protocol is initialized independently once for each SCMI platform in
99 * which is defined by DT and implemented by the SCMI server fw.
100 */
101 struct scmi_protocol_instance {
102 const struct scmi_handle *handle;
103 const struct scmi_protocol *proto;
104 void *gid;
105 refcount_t users;
106 void *priv;
107 unsigned int version;
108 unsigned int negotiated_version;
109 struct scmi_protocol_handle ph;
110 };
111
112 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
113
114 /**
115 * struct scmi_debug_info - Debug common info
116 * @top_dentry: A reference to the top debugfs dentry
117 * @name: Name of this SCMI instance
118 * @type: Type of this SCMI instance
119 * @is_atomic: Flag to state if the transport of this instance is atomic
120 * @counters: An array of atomic_c's used for tracking statistics (if enabled)
121 */
122 struct scmi_debug_info {
123 struct dentry *top_dentry;
124 const char *name;
125 const char *type;
126 bool is_atomic;
127 atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
128 };
129
130 /**
131 * struct scmi_info - Structure representing a SCMI instance
132 *
133 * @id: A sequence number starting from zero identifying this instance
134 * @dev: Device pointer
135 * @desc: SoC description for this instance
136 * @version: SCMI revision information containing protocol version,
137 * implementation version and (sub-)vendor identification.
138 * @handle: Instance of SCMI handle to send to clients
139 * @tx_minfo: Universal Transmit Message management info
140 * @rx_minfo: Universal Receive Message management info
141 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
142 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
143 * @protocols: IDR for protocols' instance descriptors initialized for
144 * this SCMI instance: populated on protocol's first attempted
145 * usage.
146 * @protocols_mtx: A mutex to protect protocols instances initialization.
147 * @protocols_imp: List of protocols implemented, currently maximum of
148 * scmi_revision_info.num_protocols elements allocated by the
149 * base protocol
150 * @active_protocols: IDR storing device_nodes for protocols actually defined
151 * in the DT and confirmed as implemented by fw.
152 * @notify_priv: Pointer to private data structure specific to notifications.
153 * @node: List head
154 * @users: Number of users of this instance
155 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
156 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
157 * bus
158 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
159 * @dbg: A pointer to debugfs related data (if any)
160 * @raw: An opaque reference handle used by SCMI Raw mode.
161 */
162 struct scmi_info {
163 int id;
164 struct device *dev;
165 const struct scmi_desc *desc;
166 struct scmi_revision_info version;
167 struct scmi_handle handle;
168 struct scmi_xfers_info tx_minfo;
169 struct scmi_xfers_info rx_minfo;
170 struct idr tx_idr;
171 struct idr rx_idr;
172 struct idr protocols;
173 /* Ensure mutual exclusive access to protocols instance array */
174 struct mutex protocols_mtx;
175 u8 *protocols_imp;
176 struct idr active_protocols;
177 void *notify_priv;
178 struct list_head node;
179 int users;
180 struct notifier_block bus_nb;
181 struct notifier_block dev_req_nb;
182 /* Serialize device creation process for this instance */
183 struct mutex devreq_mtx;
184 struct scmi_debug_info *dbg;
185 void *raw;
186 };
187
188 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
189 #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
190 #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
191
192 static void scmi_rx_callback(struct scmi_chan_info *cinfo,
193 u32 msg_hdr, void *priv);
194 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
195 u32 msg_hdr, enum scmi_bad_msg err);
196
197 static struct scmi_transport_core_operations scmi_trans_core_ops = {
198 .bad_message_trace = scmi_bad_message_trace,
199 .rx_callback = scmi_rx_callback,
200 };
201
202 static unsigned long
scmi_vendor_protocol_signature(unsigned int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)203 scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
204 char *sub_vendor_id, u32 impl_ver)
205 {
206 char *signature, *p;
207 unsigned long hash = 0;
208
209 /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
210 signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
211 vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
212 if (!signature)
213 return 0;
214
215 p = signature;
216 while (*p)
217 hash = partial_name_hash(tolower(*p++), hash);
218 hash = end_name_hash(hash);
219
220 kfree(signature);
221
222 return hash;
223 }
224
225 static unsigned long
scmi_protocol_key_calculate(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)226 scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
227 char *sub_vendor_id, u32 impl_ver)
228 {
229 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
230 return protocol_id;
231 else
232 return scmi_vendor_protocol_signature(protocol_id, vendor_id,
233 sub_vendor_id, impl_ver);
234 }
235
236 static const struct scmi_protocol *
__scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)237 __scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
238 char *sub_vendor_id, u32 impl_ver)
239 {
240 unsigned long key;
241 struct scmi_protocol *proto = NULL;
242
243 key = scmi_protocol_key_calculate(protocol_id, vendor_id,
244 sub_vendor_id, impl_ver);
245 if (key)
246 proto = xa_load(&scmi_protocols, key);
247
248 return proto;
249 }
250
251 static const struct scmi_protocol *
scmi_vendor_protocol_lookup(int protocol_id,char * vendor_id,char * sub_vendor_id,u32 impl_ver)252 scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
253 char *sub_vendor_id, u32 impl_ver)
254 {
255 const struct scmi_protocol *proto = NULL;
256
257 /* Searching for closest match ...*/
258 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
259 sub_vendor_id, impl_ver);
260 if (proto)
261 return proto;
262
263 /* Any match just on vendor/sub_vendor ? */
264 if (impl_ver) {
265 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
266 sub_vendor_id, 0);
267 if (proto)
268 return proto;
269 }
270
271 /* Any match just on the vendor ? */
272 if (sub_vendor_id)
273 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
274 NULL, 0);
275 return proto;
276 }
277
278 static const struct scmi_protocol *
scmi_protocol_get(int protocol_id,struct scmi_revision_info * version)279 scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
280 {
281 const struct scmi_protocol *proto = NULL;
282
283 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
284 proto = xa_load(&scmi_protocols, protocol_id);
285 else
286 proto = scmi_vendor_protocol_lookup(protocol_id,
287 version->vendor_id,
288 version->sub_vendor_id,
289 version->impl_ver);
290 if (!proto || !try_module_get(proto->owner)) {
291 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
292 return NULL;
293 }
294
295 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
296
297 if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
298 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
299 protocol_id, proto->vendor_id ?: "",
300 proto->sub_vendor_id ?: "", proto->impl_ver);
301
302 return proto;
303 }
304
scmi_protocol_put(const struct scmi_protocol * proto)305 static void scmi_protocol_put(const struct scmi_protocol *proto)
306 {
307 if (proto)
308 module_put(proto->owner);
309 }
310
scmi_vendor_protocol_check(const struct scmi_protocol * proto)311 static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
312 {
313 if (!proto->vendor_id) {
314 pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
315 return -EINVAL;
316 }
317
318 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
319 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
320 return -EINVAL;
321 }
322
323 if (proto->sub_vendor_id &&
324 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
325 pr_err("malformed sub_vendor_id for protocol 0x%x\n",
326 proto->id);
327 return -EINVAL;
328 }
329
330 return 0;
331 }
332
scmi_protocol_register(const struct scmi_protocol * proto)333 int scmi_protocol_register(const struct scmi_protocol *proto)
334 {
335 int ret;
336 unsigned long key;
337
338 if (!proto) {
339 pr_err("invalid protocol\n");
340 return -EINVAL;
341 }
342
343 if (!proto->instance_init) {
344 pr_err("missing init for protocol 0x%x\n", proto->id);
345 return -EINVAL;
346 }
347
348 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
349 scmi_vendor_protocol_check(proto))
350 return -EINVAL;
351
352 /*
353 * Calculate a protocol key to register this protocol with the core;
354 * key value 0 is considered invalid.
355 */
356 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
357 proto->sub_vendor_id,
358 proto->impl_ver);
359 if (!key)
360 return -EINVAL;
361
362 ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
363 if (ret) {
364 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
365 proto->id, ret);
366 return ret;
367 }
368
369 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
370
371 return 0;
372 }
373 EXPORT_SYMBOL_GPL(scmi_protocol_register);
374
scmi_protocol_unregister(const struct scmi_protocol * proto)375 void scmi_protocol_unregister(const struct scmi_protocol *proto)
376 {
377 unsigned long key;
378
379 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
380 proto->sub_vendor_id,
381 proto->impl_ver);
382 if (!key)
383 return;
384
385 xa_erase(&scmi_protocols, key);
386
387 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
388 }
389 EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
390
391 /**
392 * scmi_create_protocol_devices - Create devices for all pending requests for
393 * this SCMI instance.
394 *
395 * @np: The device node describing the protocol
396 * @info: The SCMI instance descriptor
397 * @prot_id: The protocol ID
398 * @name: The optional name of the device to be created: if not provided this
399 * call will lead to the creation of all the devices currently requested
400 * for the specified protocol.
401 */
scmi_create_protocol_devices(struct device_node * np,struct scmi_info * info,int prot_id,const char * name)402 static void scmi_create_protocol_devices(struct device_node *np,
403 struct scmi_info *info,
404 int prot_id, const char *name)
405 {
406 struct scmi_device *sdev;
407
408 mutex_lock(&info->devreq_mtx);
409 sdev = scmi_device_create(np, info->dev, prot_id, name);
410 if (name && !sdev)
411 dev_err(info->dev,
412 "failed to create device for protocol 0x%X (%s)\n",
413 prot_id, name);
414 mutex_unlock(&info->devreq_mtx);
415 }
416
scmi_destroy_protocol_devices(struct scmi_info * info,int prot_id,const char * name)417 static void scmi_destroy_protocol_devices(struct scmi_info *info,
418 int prot_id, const char *name)
419 {
420 mutex_lock(&info->devreq_mtx);
421 scmi_device_destroy(info->dev, prot_id, name);
422 mutex_unlock(&info->devreq_mtx);
423 }
424
scmi_notification_instance_data_set(const struct scmi_handle * handle,void * priv)425 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
426 void *priv)
427 {
428 struct scmi_info *info = handle_to_scmi_info(handle);
429
430 info->notify_priv = priv;
431 /* Ensure updated protocol private date are visible */
432 smp_wmb();
433 }
434
scmi_notification_instance_data_get(const struct scmi_handle * handle)435 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
436 {
437 struct scmi_info *info = handle_to_scmi_info(handle);
438
439 /* Ensure protocols_private_data has been updated */
440 smp_rmb();
441 return info->notify_priv;
442 }
443
444 /**
445 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
446 *
447 * @minfo: Pointer to Tx/Rx Message management info based on channel type
448 * @xfer: The xfer to act upon
449 *
450 * Pick the next unused monotonically increasing token and set it into
451 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
452 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
453 * of incorrect association of a late and expired xfer with a live in-flight
454 * transaction, both happening to re-use the same token identifier.
455 *
456 * Since platform is NOT required to answer our request in-order we should
457 * account for a few rare but possible scenarios:
458 *
459 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
460 * using find_next_zero_bit() starting from candidate next_token bit
461 *
462 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
463 * are plenty of free tokens at start, so try a second pass using
464 * find_next_zero_bit() and starting from 0.
465 *
466 * X = used in-flight
467 *
468 * Normal
469 * ------
470 *
471 * |- xfer_id picked
472 * -----------+----------------------------------------------------------
473 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
474 * ----------------------------------------------------------------------
475 * ^
476 * |- next_token
477 *
478 * Out-of-order pending at start
479 * -----------------------------
480 *
481 * |- xfer_id picked, last_token fixed
482 * -----+----------------------------------------------------------------
483 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
484 * ----------------------------------------------------------------------
485 * ^
486 * |- next_token
487 *
488 *
489 * Out-of-order pending at end
490 * ---------------------------
491 *
492 * |- xfer_id picked, last_token fixed
493 * -----+----------------------------------------------------------------
494 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
495 * ----------------------------------------------------------------------
496 * ^
497 * |- next_token
498 *
499 * Context: Assumes to be called with @xfer_lock already acquired.
500 *
501 * Return: 0 on Success or error
502 */
scmi_xfer_token_set(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)503 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
504 struct scmi_xfer *xfer)
505 {
506 unsigned long xfer_id, next_token;
507
508 /*
509 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
510 * using the pre-allocated transfer_id as a base.
511 * Note that the global transfer_id is shared across all message types
512 * so there could be holes in the allocated set of monotonic sequence
513 * numbers, but that is going to limit the effectiveness of the
514 * mitigation only in very rare limit conditions.
515 */
516 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
517
518 /* Pick the next available xfer_id >= next_token */
519 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
520 MSG_TOKEN_MAX, next_token);
521 if (xfer_id == MSG_TOKEN_MAX) {
522 /*
523 * After heavily out-of-order responses, there are no free
524 * tokens ahead, but only at start of xfer_alloc_table so
525 * try again from the beginning.
526 */
527 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
528 MSG_TOKEN_MAX, 0);
529 /*
530 * Something is wrong if we got here since there can be a
531 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
532 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
533 */
534 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
535 return -ENOMEM;
536 }
537
538 /* Update +/- last_token accordingly if we skipped some hole */
539 if (xfer_id != next_token)
540 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
541
542 xfer->hdr.seq = (u16)xfer_id;
543
544 return 0;
545 }
546
547 /**
548 * scmi_xfer_token_clear - Release the token
549 *
550 * @minfo: Pointer to Tx/Rx Message management info based on channel type
551 * @xfer: The xfer to act upon
552 */
scmi_xfer_token_clear(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)553 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
554 struct scmi_xfer *xfer)
555 {
556 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
557 }
558
559 /**
560 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
561 *
562 * @xfer: The xfer to register
563 * @minfo: Pointer to Tx/Rx Message management info based on channel type
564 *
565 * Note that this helper assumes that the xfer to be registered as in-flight
566 * had been built using an xfer sequence number which still corresponds to a
567 * free slot in the xfer_alloc_table.
568 *
569 * Context: Assumes to be called with @xfer_lock already acquired.
570 */
571 static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)572 scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
573 struct scmi_xfers_info *minfo)
574 {
575 /* Set in-flight */
576 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
577 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
578 xfer->pending = true;
579 }
580
581 /**
582 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
583 *
584 * @xfer: The xfer to register
585 * @minfo: Pointer to Tx/Rx Message management info based on channel type
586 *
587 * Note that this helper does NOT assume anything about the sequence number
588 * that was baked into the provided xfer, so it checks at first if it can
589 * be mapped to a free slot and fails with an error if another xfer with the
590 * same sequence number is currently still registered as in-flight.
591 *
592 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
593 * could not rbe mapped to a free slot in the xfer_alloc_table.
594 */
scmi_xfer_inflight_register(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)595 static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
596 struct scmi_xfers_info *minfo)
597 {
598 int ret = 0;
599 unsigned long flags;
600
601 spin_lock_irqsave(&minfo->xfer_lock, flags);
602 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
603 scmi_xfer_inflight_register_unlocked(xfer, minfo);
604 else
605 ret = -EBUSY;
606 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
607
608 return ret;
609 }
610
611 /**
612 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
613 * flight on the TX channel, if possible.
614 *
615 * @handle: Pointer to SCMI entity handle
616 * @xfer: The xfer to register
617 *
618 * Return: 0 on Success, error otherwise
619 */
scmi_xfer_raw_inflight_register(const struct scmi_handle * handle,struct scmi_xfer * xfer)620 int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
621 struct scmi_xfer *xfer)
622 {
623 struct scmi_info *info = handle_to_scmi_info(handle);
624
625 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
626 }
627
628 /**
629 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
630 * as pending in-flight
631 *
632 * @xfer: The xfer to act upon
633 * @minfo: Pointer to Tx/Rx Message management info based on channel type
634 *
635 * Return: 0 on Success or error otherwise
636 */
scmi_xfer_pending_set(struct scmi_xfer * xfer,struct scmi_xfers_info * minfo)637 static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
638 struct scmi_xfers_info *minfo)
639 {
640 int ret;
641 unsigned long flags;
642
643 spin_lock_irqsave(&minfo->xfer_lock, flags);
644 /* Set a new monotonic token as the xfer sequence number */
645 ret = scmi_xfer_token_set(minfo, xfer);
646 if (!ret)
647 scmi_xfer_inflight_register_unlocked(xfer, minfo);
648 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
649
650 return ret;
651 }
652
653 /**
654 * scmi_xfer_get() - Allocate one message
655 *
656 * @handle: Pointer to SCMI entity handle
657 * @minfo: Pointer to Tx/Rx Message management info based on channel type
658 *
659 * Helper function which is used by various message functions that are
660 * exposed to clients of this driver for allocating a message traffic event.
661 *
662 * Picks an xfer from the free list @free_xfers (if any available) and perform
663 * a basic initialization.
664 *
665 * Note that, at this point, still no sequence number is assigned to the
666 * allocated xfer, nor it is registered as a pending transaction.
667 *
668 * The successfully initialized xfer is refcounted.
669 *
670 * Context: Holds @xfer_lock while manipulating @free_xfers.
671 *
672 * Return: An initialized xfer if all went fine, else pointer error.
673 */
scmi_xfer_get(const struct scmi_handle * handle,struct scmi_xfers_info * minfo)674 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
675 struct scmi_xfers_info *minfo)
676 {
677 unsigned long flags;
678 struct scmi_xfer *xfer;
679
680 spin_lock_irqsave(&minfo->xfer_lock, flags);
681 if (hlist_empty(&minfo->free_xfers)) {
682 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
683 return ERR_PTR(-ENOMEM);
684 }
685
686 /* grab an xfer from the free_list */
687 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
688 hlist_del_init(&xfer->node);
689
690 /*
691 * Allocate transfer_id early so that can be used also as base for
692 * monotonic sequence number generation if needed.
693 */
694 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
695
696 refcount_set(&xfer->users, 1);
697 atomic_set(&xfer->busy, SCMI_XFER_FREE);
698 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
699
700 return xfer;
701 }
702
703 /**
704 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
705 *
706 * @handle: Pointer to SCMI entity handle
707 *
708 * Note that xfer is taken from the TX channel structures.
709 *
710 * Return: A valid xfer on Success, or an error-pointer otherwise
711 */
scmi_xfer_raw_get(const struct scmi_handle * handle)712 struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
713 {
714 struct scmi_xfer *xfer;
715 struct scmi_info *info = handle_to_scmi_info(handle);
716
717 xfer = scmi_xfer_get(handle, &info->tx_minfo);
718 if (!IS_ERR(xfer))
719 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
720
721 return xfer;
722 }
723
724 /**
725 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
726 * to use for a specific protocol_id Raw transaction.
727 *
728 * @handle: Pointer to SCMI entity handle
729 * @protocol_id: Identifier of the protocol
730 *
731 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
732 * the DT to have an associated channel and be usable; but in Raw mode any
733 * protocol in range is allowed, re-using the Base channel, so as to enable
734 * fuzzing on any protocol without the need of a fully compiled DT.
735 *
736 * Return: A reference to the channel to use, or an ERR_PTR
737 */
738 struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle * handle,u8 protocol_id)739 scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
740 {
741 struct scmi_chan_info *cinfo;
742 struct scmi_info *info = handle_to_scmi_info(handle);
743
744 cinfo = idr_find(&info->tx_idr, protocol_id);
745 if (!cinfo) {
746 if (protocol_id == SCMI_PROTOCOL_BASE)
747 return ERR_PTR(-EINVAL);
748 /* Use Base channel for protocols not defined for DT */
749 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
750 if (!cinfo)
751 return ERR_PTR(-EINVAL);
752 dev_warn_once(handle->dev,
753 "Using Base channel for protocol 0x%X\n",
754 protocol_id);
755 }
756
757 return cinfo;
758 }
759
760 /**
761 * __scmi_xfer_put() - Release a message
762 *
763 * @minfo: Pointer to Tx/Rx Message management info based on channel type
764 * @xfer: message that was reserved by scmi_xfer_get
765 *
766 * After refcount check, possibly release an xfer, clearing the token slot,
767 * removing xfer from @pending_xfers and putting it back into free_xfers.
768 *
769 * This holds a spinlock to maintain integrity of internal data structures.
770 */
771 static void
__scmi_xfer_put(struct scmi_xfers_info * minfo,struct scmi_xfer * xfer)772 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
773 {
774 unsigned long flags;
775
776 spin_lock_irqsave(&minfo->xfer_lock, flags);
777 if (refcount_dec_and_test(&xfer->users)) {
778 if (xfer->pending) {
779 scmi_xfer_token_clear(minfo, xfer);
780 hash_del(&xfer->node);
781 xfer->pending = false;
782 }
783 hlist_add_head(&xfer->node, &minfo->free_xfers);
784 }
785 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
786 }
787
788 /**
789 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
790 *
791 * @handle: Pointer to SCMI entity handle
792 * @xfer: A reference to the xfer to put
793 *
794 * Note that as with other xfer_put() handlers the xfer is really effectively
795 * released only if there are no more users on the system.
796 */
scmi_xfer_raw_put(const struct scmi_handle * handle,struct scmi_xfer * xfer)797 void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
798 {
799 struct scmi_info *info = handle_to_scmi_info(handle);
800
801 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
802 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
803 return __scmi_xfer_put(&info->tx_minfo, xfer);
804 }
805
806 /**
807 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
808 *
809 * @minfo: Pointer to Tx/Rx Message management info based on channel type
810 * @xfer_id: Token ID to lookup in @pending_xfers
811 *
812 * Refcounting is untouched.
813 *
814 * Context: Assumes to be called with @xfer_lock already acquired.
815 *
816 * Return: A valid xfer on Success or error otherwise
817 */
818 static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info * minfo,u16 xfer_id)819 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
820 {
821 struct scmi_xfer *xfer = NULL;
822
823 if (test_bit(xfer_id, minfo->xfer_alloc_table))
824 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
825
826 return xfer ?: ERR_PTR(-EINVAL);
827 }
828
829 /**
830 * scmi_bad_message_trace - A helper to trace weird messages
831 *
832 * @cinfo: A reference to the channel descriptor on which the message was
833 * received
834 * @msg_hdr: Message header to track
835 * @err: A specific error code used as a status value in traces.
836 *
837 * This helper can be used to trace any kind of weird, incomplete, unexpected,
838 * timed-out message that arrives and as such, can be traced only referring to
839 * the header content, since the payload is missing/unreliable.
840 */
scmi_bad_message_trace(struct scmi_chan_info * cinfo,u32 msg_hdr,enum scmi_bad_msg err)841 static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
842 enum scmi_bad_msg err)
843 {
844 char *tag;
845 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
846
847 switch (MSG_XTRACT_TYPE(msg_hdr)) {
848 case MSG_TYPE_COMMAND:
849 tag = "!RESP";
850 break;
851 case MSG_TYPE_DELAYED_RESP:
852 tag = "!DLYD";
853 break;
854 case MSG_TYPE_NOTIFICATION:
855 tag = "!NOTI";
856 break;
857 default:
858 tag = "!UNKN";
859 break;
860 }
861
862 trace_scmi_msg_dump(info->id, cinfo->id,
863 MSG_XTRACT_PROT_ID(msg_hdr),
864 MSG_XTRACT_ID(msg_hdr), tag,
865 MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
866 }
867
868 /**
869 * scmi_msg_response_validate - Validate message type against state of related
870 * xfer
871 *
872 * @cinfo: A reference to the channel descriptor.
873 * @msg_type: Message type to check
874 * @xfer: A reference to the xfer to validate against @msg_type
875 *
876 * This function checks if @msg_type is congruent with the current state of
877 * a pending @xfer; if an asynchronous delayed response is received before the
878 * related synchronous response (Out-of-Order Delayed Response) the missing
879 * synchronous response is assumed to be OK and completed, carrying on with the
880 * Delayed Response: this is done to address the case in which the underlying
881 * SCMI transport can deliver such out-of-order responses.
882 *
883 * Context: Assumes to be called with xfer->lock already acquired.
884 *
885 * Return: 0 on Success, error otherwise
886 */
scmi_msg_response_validate(struct scmi_chan_info * cinfo,u8 msg_type,struct scmi_xfer * xfer)887 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
888 u8 msg_type,
889 struct scmi_xfer *xfer)
890 {
891 /*
892 * Even if a response was indeed expected on this slot at this point,
893 * a buggy platform could wrongly reply feeding us an unexpected
894 * delayed response we're not prepared to handle: bail-out safely
895 * blaming firmware.
896 */
897 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
898 dev_err(cinfo->dev,
899 "Delayed Response for %d not expected! Buggy F/W ?\n",
900 xfer->hdr.seq);
901 return -EINVAL;
902 }
903
904 switch (xfer->state) {
905 case SCMI_XFER_SENT_OK:
906 if (msg_type == MSG_TYPE_DELAYED_RESP) {
907 /*
908 * Delayed Response expected but delivered earlier.
909 * Assume message RESPONSE was OK and skip state.
910 */
911 xfer->hdr.status = SCMI_SUCCESS;
912 xfer->state = SCMI_XFER_RESP_OK;
913 complete(&xfer->done);
914 dev_warn(cinfo->dev,
915 "Received valid OoO Delayed Response for %d\n",
916 xfer->hdr.seq);
917 }
918 break;
919 case SCMI_XFER_RESP_OK:
920 if (msg_type != MSG_TYPE_DELAYED_RESP)
921 return -EINVAL;
922 break;
923 case SCMI_XFER_DRESP_OK:
924 /* No further message expected once in SCMI_XFER_DRESP_OK */
925 return -EINVAL;
926 }
927
928 return 0;
929 }
930
931 /**
932 * scmi_xfer_state_update - Update xfer state
933 *
934 * @xfer: A reference to the xfer to update
935 * @msg_type: Type of message being processed.
936 *
937 * Note that this message is assumed to have been already successfully validated
938 * by @scmi_msg_response_validate(), so here we just update the state.
939 *
940 * Context: Assumes to be called on an xfer exclusively acquired using the
941 * busy flag.
942 */
scmi_xfer_state_update(struct scmi_xfer * xfer,u8 msg_type)943 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
944 {
945 xfer->hdr.type = msg_type;
946
947 /* Unknown command types were already discarded earlier */
948 if (xfer->hdr.type == MSG_TYPE_COMMAND)
949 xfer->state = SCMI_XFER_RESP_OK;
950 else
951 xfer->state = SCMI_XFER_DRESP_OK;
952 }
953
scmi_xfer_acquired(struct scmi_xfer * xfer)954 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
955 {
956 int ret;
957
958 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
959
960 return ret == SCMI_XFER_FREE;
961 }
962
963 /**
964 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
965 *
966 * @cinfo: A reference to the channel descriptor.
967 * @msg_hdr: A message header to use as lookup key
968 *
969 * When a valid xfer is found for the sequence number embedded in the provided
970 * msg_hdr, reference counting is properly updated and exclusive access to this
971 * xfer is granted till released with @scmi_xfer_command_release.
972 *
973 * Return: A valid @xfer on Success or error otherwise.
974 */
975 static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info * cinfo,u32 msg_hdr)976 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
977 {
978 int ret;
979 unsigned long flags;
980 struct scmi_xfer *xfer;
981 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
982 struct scmi_xfers_info *minfo = &info->tx_minfo;
983 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
984 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
985
986 /* Are we even expecting this? */
987 spin_lock_irqsave(&minfo->xfer_lock, flags);
988 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
989 if (IS_ERR(xfer)) {
990 dev_err(cinfo->dev,
991 "Message for %d type %d is not expected!\n",
992 xfer_id, msg_type);
993 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
994
995 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
996 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
997
998 return xfer;
999 }
1000 refcount_inc(&xfer->users);
1001 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1002
1003 spin_lock_irqsave(&xfer->lock, flags);
1004 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
1005 /*
1006 * If a pending xfer was found which was also in a congruent state with
1007 * the received message, acquire exclusive access to it setting the busy
1008 * flag.
1009 * Spins only on the rare limit condition of concurrent reception of
1010 * RESP and DRESP for the same xfer.
1011 */
1012 if (!ret) {
1013 spin_until_cond(scmi_xfer_acquired(xfer));
1014 scmi_xfer_state_update(xfer, msg_type);
1015 }
1016 spin_unlock_irqrestore(&xfer->lock, flags);
1017
1018 if (ret) {
1019 dev_err(cinfo->dev,
1020 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1021 msg_type, xfer_id, msg_hdr, xfer->state);
1022
1023 scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
1024 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
1025
1026 /* On error the refcount incremented above has to be dropped */
1027 __scmi_xfer_put(minfo, xfer);
1028 xfer = ERR_PTR(-EINVAL);
1029 }
1030
1031 return xfer;
1032 }
1033
scmi_xfer_command_release(struct scmi_info * info,struct scmi_xfer * xfer)1034 static inline void scmi_xfer_command_release(struct scmi_info *info,
1035 struct scmi_xfer *xfer)
1036 {
1037 atomic_set(&xfer->busy, SCMI_XFER_FREE);
1038 __scmi_xfer_put(&info->tx_minfo, xfer);
1039 }
1040
scmi_clear_channel(struct scmi_info * info,struct scmi_chan_info * cinfo)1041 static inline void scmi_clear_channel(struct scmi_info *info,
1042 struct scmi_chan_info *cinfo)
1043 {
1044 if (!cinfo->is_p2a) {
1045 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1046 return;
1047 }
1048
1049 if (info->desc->ops->clear_channel)
1050 info->desc->ops->clear_channel(cinfo);
1051 }
1052
scmi_handle_notification(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1053 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
1054 u32 msg_hdr, void *priv)
1055 {
1056 struct scmi_xfer *xfer;
1057 struct device *dev = cinfo->dev;
1058 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1059 struct scmi_xfers_info *minfo = &info->rx_minfo;
1060 ktime_t ts;
1061
1062 ts = ktime_get_boottime();
1063 xfer = scmi_xfer_get(cinfo->handle, minfo);
1064 if (IS_ERR(xfer)) {
1065 dev_err(dev, "failed to get free message slot (%ld)\n",
1066 PTR_ERR(xfer));
1067
1068 scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
1069 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
1070
1071 scmi_clear_channel(info, cinfo);
1072 return;
1073 }
1074
1075 unpack_scmi_header(msg_hdr, &xfer->hdr);
1076 if (priv)
1077 /* Ensure order between xfer->priv store and following ops */
1078 smp_store_mb(xfer->priv, priv);
1079 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1080 xfer);
1081
1082 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1083 xfer->hdr.id, "NOTI", xfer->hdr.seq,
1084 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1085 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
1086
1087 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1088 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1089
1090 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1091 xfer->hdr.protocol_id, xfer->hdr.seq,
1092 MSG_TYPE_NOTIFICATION);
1093
1094 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1095 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1096 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1097 cinfo->id);
1098 }
1099
1100 __scmi_xfer_put(minfo, xfer);
1101
1102 scmi_clear_channel(info, cinfo);
1103 }
1104
scmi_handle_response(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1105 static void scmi_handle_response(struct scmi_chan_info *cinfo,
1106 u32 msg_hdr, void *priv)
1107 {
1108 struct scmi_xfer *xfer;
1109 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1110
1111 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
1112 if (IS_ERR(xfer)) {
1113 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
1114 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1115
1116 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
1117 scmi_clear_channel(info, cinfo);
1118 return;
1119 }
1120
1121 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1122 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1123 xfer->rx.len = info->desc->max_msg_size;
1124
1125 if (priv)
1126 /* Ensure order between xfer->priv store and following ops */
1127 smp_store_mb(xfer->priv, priv);
1128 info->desc->ops->fetch_response(cinfo, xfer);
1129
1130 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1131 xfer->hdr.id,
1132 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1133 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
1134 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
1135 xfer->hdr.seq, xfer->hdr.status,
1136 xfer->rx.buf, xfer->rx.len);
1137
1138 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1139 xfer->hdr.protocol_id, xfer->hdr.seq,
1140 xfer->hdr.type);
1141
1142 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1143 scmi_clear_channel(info, cinfo);
1144 complete(xfer->async_done);
1145 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
1146 } else {
1147 complete(&xfer->done);
1148 scmi_inc_count(info->dbg->counters, RESPONSE_OK);
1149 }
1150
1151 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1152 /*
1153 * When in polling mode avoid to queue the Raw xfer on the IRQ
1154 * RX path since it will be already queued at the end of the TX
1155 * poll loop.
1156 */
1157 if (!xfer->hdr.poll_completion)
1158 scmi_raw_message_report(info->raw, xfer,
1159 SCMI_RAW_REPLY_QUEUE,
1160 cinfo->id);
1161 }
1162
1163 scmi_xfer_command_release(info, xfer);
1164 }
1165
1166 /**
1167 * scmi_rx_callback() - callback for receiving messages
1168 *
1169 * @cinfo: SCMI channel info
1170 * @msg_hdr: Message header
1171 * @priv: Transport specific private data.
1172 *
1173 * Processes one received message to appropriate transfer information and
1174 * signals completion of the transfer.
1175 *
1176 * NOTE: This function will be invoked in IRQ context, hence should be
1177 * as optimal as possible.
1178 */
scmi_rx_callback(struct scmi_chan_info * cinfo,u32 msg_hdr,void * priv)1179 static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
1180 void *priv)
1181 {
1182 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1183
1184 switch (msg_type) {
1185 case MSG_TYPE_NOTIFICATION:
1186 scmi_handle_notification(cinfo, msg_hdr, priv);
1187 break;
1188 case MSG_TYPE_COMMAND:
1189 case MSG_TYPE_DELAYED_RESP:
1190 scmi_handle_response(cinfo, msg_hdr, priv);
1191 break;
1192 default:
1193 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1194 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
1195 break;
1196 }
1197 }
1198
1199 /**
1200 * xfer_put() - Release a transmit message
1201 *
1202 * @ph: Pointer to SCMI protocol handle
1203 * @xfer: message that was reserved by xfer_get_init
1204 */
xfer_put(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1205 static void xfer_put(const struct scmi_protocol_handle *ph,
1206 struct scmi_xfer *xfer)
1207 {
1208 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1209 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1210
1211 __scmi_xfer_put(&info->tx_minfo, xfer);
1212 }
1213
scmi_xfer_done_no_timeout(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,ktime_t stop)1214 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1215 struct scmi_xfer *xfer, ktime_t stop)
1216 {
1217 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1218
1219 /*
1220 * Poll also on xfer->done so that polling can be forcibly terminated
1221 * in case of out-of-order receptions of delayed responses
1222 */
1223 return info->desc->ops->poll_done(cinfo, xfer) ||
1224 try_wait_for_completion(&xfer->done) ||
1225 ktime_after(ktime_get(), stop);
1226 }
1227
scmi_wait_for_reply(struct device * dev,const struct scmi_desc * desc,struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1228 static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1229 struct scmi_chan_info *cinfo,
1230 struct scmi_xfer *xfer, unsigned int timeout_ms)
1231 {
1232 int ret = 0;
1233 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1234
1235 if (xfer->hdr.poll_completion) {
1236 /*
1237 * Real polling is needed only if transport has NOT declared
1238 * itself to support synchronous commands replies.
1239 */
1240 if (!desc->sync_cmds_completed_on_ret) {
1241 /*
1242 * Poll on xfer using transport provided .poll_done();
1243 * assumes no completion interrupt was available.
1244 */
1245 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1246
1247 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
1248 xfer, stop));
1249 if (ktime_after(ktime_get(), stop)) {
1250 dev_err(dev,
1251 "timed out in resp(caller: %pS) - polling\n",
1252 (void *)_RET_IP_);
1253 ret = -ETIMEDOUT;
1254 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
1255 }
1256 }
1257
1258 if (!ret) {
1259 unsigned long flags;
1260
1261 /*
1262 * Do not fetch_response if an out-of-order delayed
1263 * response is being processed.
1264 */
1265 spin_lock_irqsave(&xfer->lock, flags);
1266 if (xfer->state == SCMI_XFER_SENT_OK) {
1267 desc->ops->fetch_response(cinfo, xfer);
1268 xfer->state = SCMI_XFER_RESP_OK;
1269 }
1270 spin_unlock_irqrestore(&xfer->lock, flags);
1271
1272 /* Trace polled replies. */
1273 trace_scmi_msg_dump(info->id, cinfo->id,
1274 xfer->hdr.protocol_id, xfer->hdr.id,
1275 !SCMI_XFER_IS_RAW(xfer) ?
1276 "RESP" : "resp",
1277 xfer->hdr.seq, xfer->hdr.status,
1278 xfer->rx.buf, xfer->rx.len);
1279 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
1280
1281 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1282 scmi_raw_message_report(info->raw, xfer,
1283 SCMI_RAW_REPLY_QUEUE,
1284 cinfo->id);
1285 }
1286 }
1287 } else {
1288 /* And we wait for the response. */
1289 if (!wait_for_completion_timeout(&xfer->done,
1290 msecs_to_jiffies(timeout_ms))) {
1291 dev_err(dev, "timed out in resp(caller: %pS)\n",
1292 (void *)_RET_IP_);
1293 ret = -ETIMEDOUT;
1294 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
1295 }
1296 }
1297
1298 return ret;
1299 }
1300
1301 /**
1302 * scmi_wait_for_message_response - An helper to group all the possible ways of
1303 * waiting for a synchronous message response.
1304 *
1305 * @cinfo: SCMI channel info
1306 * @xfer: Reference to the transfer being waited for.
1307 *
1308 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1309 * configuration flags like xfer->hdr.poll_completion.
1310 *
1311 * Return: 0 on Success, error otherwise.
1312 */
scmi_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)1313 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1314 struct scmi_xfer *xfer)
1315 {
1316 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1317 struct device *dev = info->dev;
1318
1319 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1320 xfer->hdr.protocol_id, xfer->hdr.seq,
1321 info->desc->max_rx_timeout_ms,
1322 xfer->hdr.poll_completion);
1323
1324 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1325 info->desc->max_rx_timeout_ms);
1326 }
1327
1328 /**
1329 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1330 * reply to an xfer raw request on a specific channel for the required timeout.
1331 *
1332 * @cinfo: SCMI channel info
1333 * @xfer: Reference to the transfer being waited for.
1334 * @timeout_ms: The maximum timeout in milliseconds
1335 *
1336 * Return: 0 on Success, error otherwise.
1337 */
scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer,unsigned int timeout_ms)1338 int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1339 struct scmi_xfer *xfer,
1340 unsigned int timeout_ms)
1341 {
1342 int ret;
1343 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1344 struct device *dev = info->dev;
1345
1346 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1347 if (ret)
1348 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1349 pack_scmi_header(&xfer->hdr));
1350
1351 return ret;
1352 }
1353
1354 /**
1355 * do_xfer() - Do one transfer
1356 *
1357 * @ph: Pointer to SCMI protocol handle
1358 * @xfer: Transfer to initiate and wait for response
1359 *
1360 * Return: -ETIMEDOUT in case of no response, if transmit error,
1361 * return corresponding error, else if all goes well,
1362 * return 0.
1363 */
do_xfer(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1364 static int do_xfer(const struct scmi_protocol_handle *ph,
1365 struct scmi_xfer *xfer)
1366 {
1367 int ret;
1368 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1369 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1370 struct device *dev = info->dev;
1371 struct scmi_chan_info *cinfo;
1372
1373 /* Check for polling request on custom command xfers at first */
1374 if (xfer->hdr.poll_completion &&
1375 !is_transport_polling_capable(info->desc)) {
1376 dev_warn_once(dev,
1377 "Polling mode is not supported by transport.\n");
1378 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
1379 return -EINVAL;
1380 }
1381
1382 cinfo = idr_find(&info->tx_idr, pi->proto->id);
1383 if (unlikely(!cinfo)) {
1384 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
1385 return -EINVAL;
1386 }
1387 /* True ONLY if also supported by transport. */
1388 if (is_polling_enabled(cinfo, info->desc))
1389 xfer->hdr.poll_completion = true;
1390
1391 /*
1392 * Initialise protocol id now from protocol handle to avoid it being
1393 * overridden by mistake (or malice) by the protocol code mangling with
1394 * the scmi_xfer structure prior to this.
1395 */
1396 xfer->hdr.protocol_id = pi->proto->id;
1397 reinit_completion(&xfer->done);
1398
1399 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1400 xfer->hdr.protocol_id, xfer->hdr.seq,
1401 xfer->hdr.poll_completion);
1402
1403 /* Clear any stale status */
1404 xfer->hdr.status = SCMI_SUCCESS;
1405 xfer->state = SCMI_XFER_SENT_OK;
1406 /*
1407 * Even though spinlocking is not needed here since no race is possible
1408 * on xfer->state due to the monotonically increasing tokens allocation,
1409 * we must anyway ensure xfer->state initialization is not re-ordered
1410 * after the .send_message() to be sure that on the RX path an early
1411 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1412 */
1413 smp_mb();
1414
1415 ret = info->desc->ops->send_message(cinfo, xfer);
1416 if (ret < 0) {
1417 dev_dbg(dev, "Failed to send message %d\n", ret);
1418 scmi_inc_count(info->dbg->counters, SENT_FAIL);
1419 return ret;
1420 }
1421
1422 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1423 xfer->hdr.id, "CMND", xfer->hdr.seq,
1424 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1425 scmi_inc_count(info->dbg->counters, SENT_OK);
1426
1427 ret = scmi_wait_for_message_response(cinfo, xfer);
1428 if (!ret && xfer->hdr.status) {
1429 ret = scmi_to_linux_errno(xfer->hdr.status);
1430 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
1431 }
1432
1433 if (info->desc->ops->mark_txdone)
1434 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1435
1436 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1437 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
1438
1439 return ret;
1440 }
1441
reset_rx_to_maxsz(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1442 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1443 struct scmi_xfer *xfer)
1444 {
1445 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1446 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1447
1448 xfer->rx.len = info->desc->max_msg_size;
1449 }
1450
1451 /**
1452 * do_xfer_with_response() - Do one transfer and wait until the delayed
1453 * response is received
1454 *
1455 * @ph: Pointer to SCMI protocol handle
1456 * @xfer: Transfer to initiate and wait for response
1457 *
1458 * Using asynchronous commands in atomic/polling mode should be avoided since
1459 * it could cause long busy-waiting here, so ignore polling for the delayed
1460 * response and WARN if it was requested for this command transaction since
1461 * upper layers should refrain from issuing such kind of requests.
1462 *
1463 * The only other option would have been to refrain from using any asynchronous
1464 * command even if made available, when an atomic transport is detected, and
1465 * instead forcibly use the synchronous version (thing that can be easily
1466 * attained at the protocol layer), but this would also have led to longer
1467 * stalls of the channel for synchronous commands and possibly timeouts.
1468 * (in other words there is usually a good reason if a platform provides an
1469 * asynchronous version of a command and we should prefer to use it...just not
1470 * when using atomic/polling mode)
1471 *
1472 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1473 * return corresponding error, else if all goes well, return 0.
1474 */
do_xfer_with_response(const struct scmi_protocol_handle * ph,struct scmi_xfer * xfer)1475 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1476 struct scmi_xfer *xfer)
1477 {
1478 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1479 DECLARE_COMPLETION_ONSTACK(async_response);
1480
1481 xfer->async_done = &async_response;
1482
1483 /*
1484 * Delayed responses should not be polled, so an async command should
1485 * not have been used when requiring an atomic/poll context; WARN and
1486 * perform instead a sleeping wait.
1487 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1488 */
1489 WARN_ON_ONCE(xfer->hdr.poll_completion);
1490
1491 ret = do_xfer(ph, xfer);
1492 if (!ret) {
1493 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1494 dev_err(ph->dev,
1495 "timed out in delayed resp(caller: %pS)\n",
1496 (void *)_RET_IP_);
1497 ret = -ETIMEDOUT;
1498 } else if (xfer->hdr.status) {
1499 ret = scmi_to_linux_errno(xfer->hdr.status);
1500 }
1501 }
1502
1503 xfer->async_done = NULL;
1504 return ret;
1505 }
1506
1507 /**
1508 * xfer_get_init() - Allocate and initialise one message for transmit
1509 *
1510 * @ph: Pointer to SCMI protocol handle
1511 * @msg_id: Message identifier
1512 * @tx_size: transmit message size
1513 * @rx_size: receive message size
1514 * @p: pointer to the allocated and initialised message
1515 *
1516 * This function allocates the message using @scmi_xfer_get and
1517 * initialise the header.
1518 *
1519 * Return: 0 if all went fine with @p pointing to message, else
1520 * corresponding error.
1521 */
xfer_get_init(const struct scmi_protocol_handle * ph,u8 msg_id,size_t tx_size,size_t rx_size,struct scmi_xfer ** p)1522 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1523 u8 msg_id, size_t tx_size, size_t rx_size,
1524 struct scmi_xfer **p)
1525 {
1526 int ret;
1527 struct scmi_xfer *xfer;
1528 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1529 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1530 struct scmi_xfers_info *minfo = &info->tx_minfo;
1531 struct device *dev = info->dev;
1532
1533 /* Ensure we have sane transfer sizes */
1534 if (rx_size > info->desc->max_msg_size ||
1535 tx_size > info->desc->max_msg_size)
1536 return -ERANGE;
1537
1538 xfer = scmi_xfer_get(pi->handle, minfo);
1539 if (IS_ERR(xfer)) {
1540 ret = PTR_ERR(xfer);
1541 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1542 return ret;
1543 }
1544
1545 /* Pick a sequence number and register this xfer as in-flight */
1546 ret = scmi_xfer_pending_set(xfer, minfo);
1547 if (ret) {
1548 dev_err(pi->handle->dev,
1549 "Failed to get monotonic token %d\n", ret);
1550 __scmi_xfer_put(minfo, xfer);
1551 return ret;
1552 }
1553
1554 xfer->tx.len = tx_size;
1555 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1556 xfer->hdr.type = MSG_TYPE_COMMAND;
1557 xfer->hdr.id = msg_id;
1558 xfer->hdr.poll_completion = false;
1559
1560 *p = xfer;
1561
1562 return 0;
1563 }
1564
1565 /**
1566 * version_get() - command to get the revision of the SCMI entity
1567 *
1568 * @ph: Pointer to SCMI protocol handle
1569 * @version: Holds returned version of protocol.
1570 *
1571 * Updates the SCMI information in the internal data structure.
1572 *
1573 * Return: 0 if all went fine, else return appropriate error.
1574 */
version_get(const struct scmi_protocol_handle * ph,u32 * version)1575 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1576 {
1577 int ret;
1578 __le32 *rev_info;
1579 struct scmi_xfer *t;
1580
1581 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1582 if (ret)
1583 return ret;
1584
1585 ret = do_xfer(ph, t);
1586 if (!ret) {
1587 rev_info = t->rx.buf;
1588 *version = le32_to_cpu(*rev_info);
1589 }
1590
1591 xfer_put(ph, t);
1592 return ret;
1593 }
1594
1595 /**
1596 * scmi_set_protocol_priv - Set protocol specific data at init time
1597 *
1598 * @ph: A reference to the protocol handle.
1599 * @priv: The private data to set.
1600 * @version: The detected protocol version for the core to register.
1601 *
1602 * Return: 0 on Success
1603 */
scmi_set_protocol_priv(const struct scmi_protocol_handle * ph,void * priv,u32 version)1604 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1605 void *priv, u32 version)
1606 {
1607 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1608
1609 pi->priv = priv;
1610 pi->version = version;
1611
1612 return 0;
1613 }
1614
1615 /**
1616 * scmi_get_protocol_priv - Set protocol specific data at init time
1617 *
1618 * @ph: A reference to the protocol handle.
1619 *
1620 * Return: Protocol private data if any was set.
1621 */
scmi_get_protocol_priv(const struct scmi_protocol_handle * ph)1622 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1623 {
1624 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1625
1626 return pi->priv;
1627 }
1628
1629 static const struct scmi_xfer_ops xfer_ops = {
1630 .version_get = version_get,
1631 .xfer_get_init = xfer_get_init,
1632 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1633 .do_xfer = do_xfer,
1634 .do_xfer_with_response = do_xfer_with_response,
1635 .xfer_put = xfer_put,
1636 };
1637
1638 struct scmi_msg_resp_domain_name_get {
1639 __le32 flags;
1640 u8 name[SCMI_MAX_STR_SIZE];
1641 };
1642
1643 /**
1644 * scmi_common_extended_name_get - Common helper to get extended resources name
1645 * @ph: A protocol handle reference.
1646 * @cmd_id: The specific command ID to use.
1647 * @res_id: The specific resource ID to use.
1648 * @flags: A pointer to specific flags to use, if any.
1649 * @name: A pointer to the preallocated area where the retrieved name will be
1650 * stored as a NULL terminated string.
1651 * @len: The len in bytes of the @name char array.
1652 *
1653 * Return: 0 on Succcess
1654 */
scmi_common_extended_name_get(const struct scmi_protocol_handle * ph,u8 cmd_id,u32 res_id,u32 * flags,char * name,size_t len)1655 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1656 u8 cmd_id, u32 res_id, u32 *flags,
1657 char *name, size_t len)
1658 {
1659 int ret;
1660 size_t txlen;
1661 struct scmi_xfer *t;
1662 struct scmi_msg_resp_domain_name_get *resp;
1663
1664 txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1665 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1666 if (ret)
1667 goto out;
1668
1669 put_unaligned_le32(res_id, t->tx.buf);
1670 if (flags)
1671 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1672 resp = t->rx.buf;
1673
1674 ret = ph->xops->do_xfer(ph, t);
1675 if (!ret)
1676 strscpy(name, resp->name, len);
1677
1678 ph->xops->xfer_put(ph, t);
1679 out:
1680 if (ret)
1681 dev_warn(ph->dev,
1682 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1683 res_id, ret, name);
1684 return ret;
1685 }
1686
1687 /**
1688 * scmi_common_get_max_msg_size - Get maximum message size
1689 * @ph: A protocol handle reference.
1690 *
1691 * Return: Maximum message size for the current protocol.
1692 */
scmi_common_get_max_msg_size(const struct scmi_protocol_handle * ph)1693 static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1694 {
1695 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1696 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1697
1698 return info->desc->max_msg_size;
1699 }
1700
1701 /**
1702 * struct scmi_iterator - Iterator descriptor
1703 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1704 * a proper custom command payload for each multi-part command request.
1705 * @resp: A reference to the response RX buffer; used by @update_state and
1706 * @process_response to parse the multi-part replies.
1707 * @t: A reference to the underlying xfer initialized and used transparently by
1708 * the iterator internal routines.
1709 * @ph: A reference to the associated protocol handle to be used.
1710 * @ops: A reference to the custom provided iterator operations.
1711 * @state: The current iterator state; used and updated in turn by the iterators
1712 * internal routines and by the caller-provided @scmi_iterator_ops.
1713 * @priv: A reference to optional private data as provided by the caller and
1714 * passed back to the @@scmi_iterator_ops.
1715 */
1716 struct scmi_iterator {
1717 void *msg;
1718 void *resp;
1719 struct scmi_xfer *t;
1720 const struct scmi_protocol_handle *ph;
1721 struct scmi_iterator_ops *ops;
1722 struct scmi_iterator_state state;
1723 void *priv;
1724 };
1725
scmi_iterator_init(const struct scmi_protocol_handle * ph,struct scmi_iterator_ops * ops,unsigned int max_resources,u8 msg_id,size_t tx_size,void * priv)1726 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1727 struct scmi_iterator_ops *ops,
1728 unsigned int max_resources, u8 msg_id,
1729 size_t tx_size, void *priv)
1730 {
1731 int ret;
1732 struct scmi_iterator *i;
1733
1734 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1735 if (!i)
1736 return ERR_PTR(-ENOMEM);
1737
1738 i->ph = ph;
1739 i->ops = ops;
1740 i->priv = priv;
1741
1742 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1743 if (ret) {
1744 devm_kfree(ph->dev, i);
1745 return ERR_PTR(ret);
1746 }
1747
1748 i->state.max_resources = max_resources;
1749 i->msg = i->t->tx.buf;
1750 i->resp = i->t->rx.buf;
1751
1752 return i;
1753 }
1754
scmi_iterator_run(void * iter)1755 static int scmi_iterator_run(void *iter)
1756 {
1757 int ret = -EINVAL;
1758 struct scmi_iterator_ops *iops;
1759 const struct scmi_protocol_handle *ph;
1760 struct scmi_iterator_state *st;
1761 struct scmi_iterator *i = iter;
1762
1763 if (!i || !i->ops || !i->ph)
1764 return ret;
1765
1766 iops = i->ops;
1767 ph = i->ph;
1768 st = &i->state;
1769
1770 do {
1771 iops->prepare_message(i->msg, st->desc_index, i->priv);
1772 ret = ph->xops->do_xfer(ph, i->t);
1773 if (ret)
1774 break;
1775
1776 st->rx_len = i->t->rx.len;
1777 ret = iops->update_state(st, i->resp, i->priv);
1778 if (ret)
1779 break;
1780
1781 if (st->num_returned > st->max_resources - st->desc_index) {
1782 dev_err(ph->dev,
1783 "No. of resources can't exceed %d\n",
1784 st->max_resources);
1785 ret = -EINVAL;
1786 break;
1787 }
1788
1789 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1790 st->loop_idx++) {
1791 ret = iops->process_response(ph, i->resp, st, i->priv);
1792 if (ret)
1793 goto out;
1794 }
1795
1796 st->desc_index += st->num_returned;
1797 ph->xops->reset_rx_to_maxsz(ph, i->t);
1798 /*
1799 * check for both returned and remaining to avoid infinite
1800 * loop due to buggy firmware
1801 */
1802 } while (st->num_returned && st->num_remaining);
1803
1804 out:
1805 /* Finalize and destroy iterator */
1806 ph->xops->xfer_put(ph, i->t);
1807 devm_kfree(ph->dev, i);
1808
1809 return ret;
1810 }
1811
1812 struct scmi_msg_get_fc_info {
1813 __le32 domain;
1814 __le32 message_id;
1815 };
1816
1817 struct scmi_msg_resp_desc_fc {
1818 __le32 attr;
1819 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1820 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1821 __le32 rate_limit;
1822 __le32 chan_addr_low;
1823 __le32 chan_addr_high;
1824 __le32 chan_size;
1825 __le32 db_addr_low;
1826 __le32 db_addr_high;
1827 __le32 db_set_lmask;
1828 __le32 db_set_hmask;
1829 __le32 db_preserve_lmask;
1830 __le32 db_preserve_hmask;
1831 };
1832
1833 static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle * ph,u8 describe_id,u32 message_id,u32 valid_size,u32 domain,void __iomem ** p_addr,struct scmi_fc_db_info ** p_db,u32 * rate_limit)1834 scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1835 u8 describe_id, u32 message_id, u32 valid_size,
1836 u32 domain, void __iomem **p_addr,
1837 struct scmi_fc_db_info **p_db, u32 *rate_limit)
1838 {
1839 int ret;
1840 u32 flags;
1841 u64 phys_addr;
1842 u8 size;
1843 void __iomem *addr;
1844 struct scmi_xfer *t;
1845 struct scmi_fc_db_info *db = NULL;
1846 struct scmi_msg_get_fc_info *info;
1847 struct scmi_msg_resp_desc_fc *resp;
1848 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1849
1850 if (!p_addr) {
1851 ret = -EINVAL;
1852 goto err_out;
1853 }
1854
1855 ret = ph->xops->xfer_get_init(ph, describe_id,
1856 sizeof(*info), sizeof(*resp), &t);
1857 if (ret)
1858 goto err_out;
1859
1860 info = t->tx.buf;
1861 info->domain = cpu_to_le32(domain);
1862 info->message_id = cpu_to_le32(message_id);
1863
1864 /*
1865 * Bail out on error leaving fc_info addresses zeroed; this includes
1866 * the case in which the requested domain/message_id does NOT support
1867 * fastchannels at all.
1868 */
1869 ret = ph->xops->do_xfer(ph, t);
1870 if (ret)
1871 goto err_xfer;
1872
1873 resp = t->rx.buf;
1874 flags = le32_to_cpu(resp->attr);
1875 size = le32_to_cpu(resp->chan_size);
1876 if (size != valid_size) {
1877 ret = -EINVAL;
1878 goto err_xfer;
1879 }
1880
1881 if (rate_limit)
1882 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1883
1884 phys_addr = le32_to_cpu(resp->chan_addr_low);
1885 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1886 addr = devm_ioremap(ph->dev, phys_addr, size);
1887 if (!addr) {
1888 ret = -EADDRNOTAVAIL;
1889 goto err_xfer;
1890 }
1891
1892 *p_addr = addr;
1893
1894 if (p_db && SUPPORTS_DOORBELL(flags)) {
1895 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1896 if (!db) {
1897 ret = -ENOMEM;
1898 goto err_db;
1899 }
1900
1901 size = 1 << DOORBELL_REG_WIDTH(flags);
1902 phys_addr = le32_to_cpu(resp->db_addr_low);
1903 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1904 addr = devm_ioremap(ph->dev, phys_addr, size);
1905 if (!addr) {
1906 ret = -EADDRNOTAVAIL;
1907 goto err_db_mem;
1908 }
1909
1910 db->addr = addr;
1911 db->width = size;
1912 db->set = le32_to_cpu(resp->db_set_lmask);
1913 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1914 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1915 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1916
1917 *p_db = db;
1918 }
1919
1920 ph->xops->xfer_put(ph, t);
1921
1922 dev_dbg(ph->dev,
1923 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
1924 pi->proto->id, message_id, domain);
1925
1926 return;
1927
1928 err_db_mem:
1929 devm_kfree(ph->dev, db);
1930
1931 err_db:
1932 *p_addr = NULL;
1933
1934 err_xfer:
1935 ph->xops->xfer_put(ph, t);
1936
1937 err_out:
1938 dev_warn(ph->dev,
1939 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
1940 pi->proto->id, message_id, domain, ret);
1941 }
1942
1943 #define SCMI_PROTO_FC_RING_DB(w) \
1944 do { \
1945 u##w val = 0; \
1946 \
1947 if (db->mask) \
1948 val = ioread##w(db->addr) & db->mask; \
1949 iowrite##w((u##w)db->set | val, db->addr); \
1950 } while (0)
1951
scmi_common_fastchannel_db_ring(struct scmi_fc_db_info * db)1952 static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
1953 {
1954 if (!db || !db->addr)
1955 return;
1956
1957 if (db->width == 1)
1958 SCMI_PROTO_FC_RING_DB(8);
1959 else if (db->width == 2)
1960 SCMI_PROTO_FC_RING_DB(16);
1961 else if (db->width == 4)
1962 SCMI_PROTO_FC_RING_DB(32);
1963 else /* db->width == 8 */
1964 #ifdef CONFIG_64BIT
1965 SCMI_PROTO_FC_RING_DB(64);
1966 #else
1967 {
1968 u64 val = 0;
1969
1970 if (db->mask)
1971 val = ioread64_hi_lo(db->addr) & db->mask;
1972 iowrite64_hi_lo(db->set | val, db->addr);
1973 }
1974 #endif
1975 }
1976
1977 /**
1978 * scmi_protocol_msg_check - Check protocol message attributes
1979 *
1980 * @ph: A reference to the protocol handle.
1981 * @message_id: The ID of the message to check.
1982 * @attributes: A parameter to optionally return the retrieved message
1983 * attributes, in case of Success.
1984 *
1985 * An helper to check protocol message attributes for a specific protocol
1986 * and message pair.
1987 *
1988 * Return: 0 on SUCCESS
1989 */
scmi_protocol_msg_check(const struct scmi_protocol_handle * ph,u32 message_id,u32 * attributes)1990 static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1991 u32 message_id, u32 *attributes)
1992 {
1993 int ret;
1994 struct scmi_xfer *t;
1995
1996 ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1997 sizeof(__le32), 0, &t);
1998 if (ret)
1999 return ret;
2000
2001 put_unaligned_le32(message_id, t->tx.buf);
2002 ret = do_xfer(ph, t);
2003 if (!ret && attributes)
2004 *attributes = get_unaligned_le32(t->rx.buf);
2005 xfer_put(ph, t);
2006
2007 return ret;
2008 }
2009
2010 static const struct scmi_proto_helpers_ops helpers_ops = {
2011 .extended_name_get = scmi_common_extended_name_get,
2012 .get_max_msg_size = scmi_common_get_max_msg_size,
2013 .iter_response_init = scmi_iterator_init,
2014 .iter_response_run = scmi_iterator_run,
2015 .protocol_msg_check = scmi_protocol_msg_check,
2016 .fastchannel_init = scmi_common_fastchannel_init,
2017 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
2018 };
2019
2020 /**
2021 * scmi_revision_area_get - Retrieve version memory area.
2022 *
2023 * @ph: A reference to the protocol handle.
2024 *
2025 * A helper to grab the version memory area reference during SCMI Base protocol
2026 * initialization.
2027 *
2028 * Return: A reference to the version memory area associated to the SCMI
2029 * instance underlying this protocol handle.
2030 */
2031 struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle * ph)2032 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
2033 {
2034 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2035
2036 return pi->handle->version;
2037 }
2038
2039 /**
2040 * scmi_protocol_version_negotiate - Negotiate protocol version
2041 *
2042 * @ph: A reference to the protocol handle.
2043 *
2044 * An helper to negotiate a protocol version different from the latest
2045 * advertised as supported from the platform: on Success backward
2046 * compatibility is assured by the platform.
2047 *
2048 * Return: 0 on Success
2049 */
scmi_protocol_version_negotiate(struct scmi_protocol_handle * ph)2050 static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
2051 {
2052 int ret;
2053 struct scmi_xfer *t;
2054 struct scmi_protocol_instance *pi = ph_to_pi(ph);
2055
2056 /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2057 ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
2058 if (ret)
2059 return ret;
2060
2061 /* ... then attempt protocol version negotiation */
2062 ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
2063 sizeof(__le32), 0, &t);
2064 if (ret)
2065 return ret;
2066
2067 put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2068 ret = do_xfer(ph, t);
2069 if (!ret)
2070 pi->negotiated_version = pi->proto->supported_version;
2071
2072 xfer_put(ph, t);
2073
2074 return ret;
2075 }
2076
2077 /**
2078 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2079 * instance descriptor.
2080 * @info: The reference to the related SCMI instance.
2081 * @proto: The protocol descriptor.
2082 *
2083 * Allocate a new protocol instance descriptor, using the provided @proto
2084 * description, against the specified SCMI instance @info, and initialize it;
2085 * all resources management is handled via a dedicated per-protocol devres
2086 * group.
2087 *
2088 * Context: Assumes to be called with @protocols_mtx already acquired.
2089 * Return: A reference to a freshly allocated and initialized protocol instance
2090 * or ERR_PTR on failure. On failure the @proto reference is at first
2091 * put using @scmi_protocol_put() before releasing all the devres group.
2092 */
2093 static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info * info,const struct scmi_protocol * proto)2094 scmi_alloc_init_protocol_instance(struct scmi_info *info,
2095 const struct scmi_protocol *proto)
2096 {
2097 int ret = -ENOMEM;
2098 void *gid;
2099 struct scmi_protocol_instance *pi;
2100 const struct scmi_handle *handle = &info->handle;
2101
2102 /* Protocol specific devres group */
2103 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2104 if (!gid) {
2105 scmi_protocol_put(proto);
2106 goto out;
2107 }
2108
2109 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2110 if (!pi)
2111 goto clean;
2112
2113 pi->gid = gid;
2114 pi->proto = proto;
2115 pi->handle = handle;
2116 pi->ph.dev = handle->dev;
2117 pi->ph.xops = &xfer_ops;
2118 pi->ph.hops = &helpers_ops;
2119 pi->ph.set_priv = scmi_set_protocol_priv;
2120 pi->ph.get_priv = scmi_get_protocol_priv;
2121 refcount_set(&pi->users, 1);
2122 /* proto->init is assured NON NULL by scmi_protocol_register */
2123 ret = pi->proto->instance_init(&pi->ph);
2124 if (ret)
2125 goto clean;
2126
2127 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2128 GFP_KERNEL);
2129 if (ret != proto->id)
2130 goto clean;
2131
2132 /*
2133 * Warn but ignore events registration errors since we do not want
2134 * to skip whole protocols if their notifications are messed up.
2135 */
2136 if (pi->proto->events) {
2137 ret = scmi_register_protocol_events(handle, pi->proto->id,
2138 &pi->ph,
2139 pi->proto->events);
2140 if (ret)
2141 dev_warn(handle->dev,
2142 "Protocol:%X - Events Registration Failed - err:%d\n",
2143 pi->proto->id, ret);
2144 }
2145
2146 devres_close_group(handle->dev, pi->gid);
2147 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2148
2149 if (pi->version > proto->supported_version) {
2150 ret = scmi_protocol_version_negotiate(&pi->ph);
2151 if (!ret) {
2152 dev_info(handle->dev,
2153 "Protocol 0x%X successfully negotiated version 0x%X\n",
2154 proto->id, pi->negotiated_version);
2155 } else {
2156 dev_warn(handle->dev,
2157 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2158 pi->version, pi->proto->id);
2159 dev_warn(handle->dev,
2160 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
2161 pi->proto->supported_version);
2162 }
2163 }
2164
2165 return pi;
2166
2167 clean:
2168 /* Take care to put the protocol module's owner before releasing all */
2169 scmi_protocol_put(proto);
2170 devres_release_group(handle->dev, gid);
2171 out:
2172 return ERR_PTR(ret);
2173 }
2174
2175 /**
2176 * scmi_get_protocol_instance - Protocol initialization helper.
2177 * @handle: A reference to the SCMI platform instance.
2178 * @protocol_id: The protocol being requested.
2179 *
2180 * In case the required protocol has never been requested before for this
2181 * instance, allocate and initialize all the needed structures while handling
2182 * resource allocation with a dedicated per-protocol devres subgroup.
2183 *
2184 * Return: A reference to an initialized protocol instance or error on failure:
2185 * in particular returns -EPROBE_DEFER when the desired protocol could
2186 * NOT be found.
2187 */
2188 static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle * handle,u8 protocol_id)2189 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
2190 {
2191 struct scmi_protocol_instance *pi;
2192 struct scmi_info *info = handle_to_scmi_info(handle);
2193
2194 mutex_lock(&info->protocols_mtx);
2195 pi = idr_find(&info->protocols, protocol_id);
2196
2197 if (pi) {
2198 refcount_inc(&pi->users);
2199 } else {
2200 const struct scmi_protocol *proto;
2201
2202 /* Fails if protocol not registered on bus */
2203 proto = scmi_protocol_get(protocol_id, &info->version);
2204 if (proto)
2205 pi = scmi_alloc_init_protocol_instance(info, proto);
2206 else
2207 pi = ERR_PTR(-EPROBE_DEFER);
2208 }
2209 mutex_unlock(&info->protocols_mtx);
2210
2211 return pi;
2212 }
2213
2214 /**
2215 * scmi_protocol_acquire - Protocol acquire
2216 * @handle: A reference to the SCMI platform instance.
2217 * @protocol_id: The protocol being requested.
2218 *
2219 * Register a new user for the requested protocol on the specified SCMI
2220 * platform instance, possibly triggering its initialization on first user.
2221 *
2222 * Return: 0 if protocol was acquired successfully.
2223 */
scmi_protocol_acquire(const struct scmi_handle * handle,u8 protocol_id)2224 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2225 {
2226 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2227 }
2228
2229 /**
2230 * scmi_protocol_release - Protocol de-initialization helper.
2231 * @handle: A reference to the SCMI platform instance.
2232 * @protocol_id: The protocol being requested.
2233 *
2234 * Remove one user for the specified protocol and triggers de-initialization
2235 * and resources de-allocation once the last user has gone.
2236 */
scmi_protocol_release(const struct scmi_handle * handle,u8 protocol_id)2237 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2238 {
2239 struct scmi_info *info = handle_to_scmi_info(handle);
2240 struct scmi_protocol_instance *pi;
2241
2242 mutex_lock(&info->protocols_mtx);
2243 pi = idr_find(&info->protocols, protocol_id);
2244 if (WARN_ON(!pi))
2245 goto out;
2246
2247 if (refcount_dec_and_test(&pi->users)) {
2248 void *gid = pi->gid;
2249
2250 if (pi->proto->events)
2251 scmi_deregister_protocol_events(handle, protocol_id);
2252
2253 if (pi->proto->instance_deinit)
2254 pi->proto->instance_deinit(&pi->ph);
2255
2256 idr_remove(&info->protocols, protocol_id);
2257
2258 scmi_protocol_put(pi->proto);
2259
2260 devres_release_group(handle->dev, gid);
2261 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2262 protocol_id);
2263 }
2264
2265 out:
2266 mutex_unlock(&info->protocols_mtx);
2267 }
2268
scmi_setup_protocol_implemented(const struct scmi_protocol_handle * ph,u8 * prot_imp)2269 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2270 u8 *prot_imp)
2271 {
2272 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2273 struct scmi_info *info = handle_to_scmi_info(pi->handle);
2274
2275 info->protocols_imp = prot_imp;
2276 }
2277
2278 static bool
scmi_is_protocol_implemented(const struct scmi_handle * handle,u8 prot_id)2279 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2280 {
2281 int i;
2282 struct scmi_info *info = handle_to_scmi_info(handle);
2283 struct scmi_revision_info *rev = handle->version;
2284
2285 if (!info->protocols_imp)
2286 return false;
2287
2288 for (i = 0; i < rev->num_protocols; i++)
2289 if (info->protocols_imp[i] == prot_id)
2290 return true;
2291 return false;
2292 }
2293
2294 struct scmi_protocol_devres {
2295 const struct scmi_handle *handle;
2296 u8 protocol_id;
2297 };
2298
scmi_devm_release_protocol(struct device * dev,void * res)2299 static void scmi_devm_release_protocol(struct device *dev, void *res)
2300 {
2301 struct scmi_protocol_devres *dres = res;
2302
2303 scmi_protocol_release(dres->handle, dres->protocol_id);
2304 }
2305
2306 static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device * sdev,u8 protocol_id)2307 scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2308 {
2309 struct scmi_protocol_instance *pi;
2310 struct scmi_protocol_devres *dres;
2311
2312 dres = devres_alloc(scmi_devm_release_protocol,
2313 sizeof(*dres), GFP_KERNEL);
2314 if (!dres)
2315 return ERR_PTR(-ENOMEM);
2316
2317 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2318 if (IS_ERR(pi)) {
2319 devres_free(dres);
2320 return pi;
2321 }
2322
2323 dres->handle = sdev->handle;
2324 dres->protocol_id = protocol_id;
2325 devres_add(&sdev->dev, dres);
2326
2327 return pi;
2328 }
2329
2330 /**
2331 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2332 * @sdev: A reference to an scmi_device whose embedded struct device is to
2333 * be used for devres accounting.
2334 * @protocol_id: The protocol being requested.
2335 * @ph: A pointer reference used to pass back the associated protocol handle.
2336 *
2337 * Get hold of a protocol accounting for its usage, eventually triggering its
2338 * initialization, and returning the protocol specific operations and related
2339 * protocol handle which will be used as first argument in most of the
2340 * protocols operations methods.
2341 * Being a devres based managed method, protocol hold will be automatically
2342 * released, and possibly de-initialized on last user, once the SCMI driver
2343 * owning the scmi_device is unbound from it.
2344 *
2345 * Return: A reference to the requested protocol operations or error.
2346 * Must be checked for errors by caller.
2347 */
2348 static const void __must_check *
scmi_devm_protocol_get(struct scmi_device * sdev,u8 protocol_id,struct scmi_protocol_handle ** ph)2349 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2350 struct scmi_protocol_handle **ph)
2351 {
2352 struct scmi_protocol_instance *pi;
2353
2354 if (!ph)
2355 return ERR_PTR(-EINVAL);
2356
2357 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2358 if (IS_ERR(pi))
2359 return pi;
2360
2361 *ph = &pi->ph;
2362
2363 return pi->proto->ops;
2364 }
2365
2366 /**
2367 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2368 * @sdev: A reference to an scmi_device whose embedded struct device is to
2369 * be used for devres accounting.
2370 * @protocol_id: The protocol being requested.
2371 *
2372 * Get hold of a protocol accounting for its usage, possibly triggering its
2373 * initialization but without getting access to its protocol specific operations
2374 * and handle.
2375 *
2376 * Being a devres based managed method, protocol hold will be automatically
2377 * released, and possibly de-initialized on last user, once the SCMI driver
2378 * owning the scmi_device is unbound from it.
2379 *
2380 * Return: 0 on SUCCESS
2381 */
scmi_devm_protocol_acquire(struct scmi_device * sdev,u8 protocol_id)2382 static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2383 u8 protocol_id)
2384 {
2385 struct scmi_protocol_instance *pi;
2386
2387 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2388 if (IS_ERR(pi))
2389 return PTR_ERR(pi);
2390
2391 return 0;
2392 }
2393
scmi_devm_protocol_match(struct device * dev,void * res,void * data)2394 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2395 {
2396 struct scmi_protocol_devres *dres = res;
2397
2398 if (WARN_ON(!dres || !data))
2399 return 0;
2400
2401 return dres->protocol_id == *((u8 *)data);
2402 }
2403
2404 /**
2405 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2406 * @sdev: A reference to an scmi_device whose embedded struct device is to
2407 * be used for devres accounting.
2408 * @protocol_id: The protocol being requested.
2409 *
2410 * Explicitly release a protocol hold previously obtained calling the above
2411 * @scmi_devm_protocol_get.
2412 */
scmi_devm_protocol_put(struct scmi_device * sdev,u8 protocol_id)2413 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2414 {
2415 int ret;
2416
2417 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2418 scmi_devm_protocol_match, &protocol_id);
2419 WARN_ON(ret);
2420 }
2421
2422 /**
2423 * scmi_is_transport_atomic - Method to check if underlying transport for an
2424 * SCMI instance is configured as atomic.
2425 *
2426 * @handle: A reference to the SCMI platform instance.
2427 * @atomic_threshold: An optional return value for the system wide currently
2428 * configured threshold for atomic operations.
2429 *
2430 * Return: True if transport is configured as atomic
2431 */
scmi_is_transport_atomic(const struct scmi_handle * handle,unsigned int * atomic_threshold)2432 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2433 unsigned int *atomic_threshold)
2434 {
2435 bool ret;
2436 struct scmi_info *info = handle_to_scmi_info(handle);
2437
2438 ret = info->desc->atomic_enabled &&
2439 is_transport_polling_capable(info->desc);
2440 if (ret && atomic_threshold)
2441 *atomic_threshold = info->desc->atomic_threshold;
2442
2443 return ret;
2444 }
2445
2446 /**
2447 * scmi_handle_get() - Get the SCMI handle for a device
2448 *
2449 * @dev: pointer to device for which we want SCMI handle
2450 *
2451 * NOTE: The function does not track individual clients of the framework
2452 * and is expected to be maintained by caller of SCMI protocol library.
2453 * scmi_handle_put must be balanced with successful scmi_handle_get
2454 *
2455 * Return: pointer to handle if successful, NULL on error
2456 */
scmi_handle_get(struct device * dev)2457 static struct scmi_handle *scmi_handle_get(struct device *dev)
2458 {
2459 struct list_head *p;
2460 struct scmi_info *info;
2461 struct scmi_handle *handle = NULL;
2462
2463 mutex_lock(&scmi_list_mutex);
2464 list_for_each(p, &scmi_list) {
2465 info = list_entry(p, struct scmi_info, node);
2466 if (dev->parent == info->dev) {
2467 info->users++;
2468 handle = &info->handle;
2469 break;
2470 }
2471 }
2472 mutex_unlock(&scmi_list_mutex);
2473
2474 return handle;
2475 }
2476
2477 /**
2478 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2479 *
2480 * @handle: handle acquired by scmi_handle_get
2481 *
2482 * NOTE: The function does not track individual clients of the framework
2483 * and is expected to be maintained by caller of SCMI protocol library.
2484 * scmi_handle_put must be balanced with successful scmi_handle_get
2485 *
2486 * Return: 0 is successfully released
2487 * if null was passed, it returns -EINVAL;
2488 */
scmi_handle_put(const struct scmi_handle * handle)2489 static int scmi_handle_put(const struct scmi_handle *handle)
2490 {
2491 struct scmi_info *info;
2492
2493 if (!handle)
2494 return -EINVAL;
2495
2496 info = handle_to_scmi_info(handle);
2497 mutex_lock(&scmi_list_mutex);
2498 if (!WARN_ON(!info->users))
2499 info->users--;
2500 mutex_unlock(&scmi_list_mutex);
2501
2502 return 0;
2503 }
2504
scmi_device_link_add(struct device * consumer,struct device * supplier)2505 static void scmi_device_link_add(struct device *consumer,
2506 struct device *supplier)
2507 {
2508 struct device_link *link;
2509
2510 link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2511
2512 WARN_ON(!link);
2513 }
2514
scmi_set_handle(struct scmi_device * scmi_dev)2515 static void scmi_set_handle(struct scmi_device *scmi_dev)
2516 {
2517 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2518 if (scmi_dev->handle)
2519 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2520 }
2521
__scmi_xfer_info_init(struct scmi_info * sinfo,struct scmi_xfers_info * info)2522 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2523 struct scmi_xfers_info *info)
2524 {
2525 int i;
2526 struct scmi_xfer *xfer;
2527 struct device *dev = sinfo->dev;
2528 const struct scmi_desc *desc = sinfo->desc;
2529
2530 /* Pre-allocated messages, no more than what hdr.seq can support */
2531 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2532 dev_err(dev,
2533 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2534 info->max_msg, MSG_TOKEN_MAX);
2535 return -EINVAL;
2536 }
2537
2538 hash_init(info->pending_xfers);
2539
2540 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2541 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2542 GFP_KERNEL);
2543 if (!info->xfer_alloc_table)
2544 return -ENOMEM;
2545
2546 /*
2547 * Preallocate a number of xfers equal to max inflight messages,
2548 * pre-initialize the buffer pointer to pre-allocated buffers and
2549 * attach all of them to the free list
2550 */
2551 INIT_HLIST_HEAD(&info->free_xfers);
2552 for (i = 0; i < info->max_msg; i++) {
2553 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2554 if (!xfer)
2555 return -ENOMEM;
2556
2557 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2558 GFP_KERNEL);
2559 if (!xfer->rx.buf)
2560 return -ENOMEM;
2561
2562 xfer->tx.buf = xfer->rx.buf;
2563 init_completion(&xfer->done);
2564 spin_lock_init(&xfer->lock);
2565
2566 /* Add initialized xfer to the free list */
2567 hlist_add_head(&xfer->node, &info->free_xfers);
2568 }
2569
2570 spin_lock_init(&info->xfer_lock);
2571
2572 return 0;
2573 }
2574
scmi_channels_max_msg_configure(struct scmi_info * sinfo)2575 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2576 {
2577 const struct scmi_desc *desc = sinfo->desc;
2578
2579 if (!desc->ops->get_max_msg) {
2580 sinfo->tx_minfo.max_msg = desc->max_msg;
2581 sinfo->rx_minfo.max_msg = desc->max_msg;
2582 } else {
2583 struct scmi_chan_info *base_cinfo;
2584
2585 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2586 if (!base_cinfo)
2587 return -EINVAL;
2588 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2589
2590 /* RX channel is optional so can be skipped */
2591 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2592 if (base_cinfo)
2593 sinfo->rx_minfo.max_msg =
2594 desc->ops->get_max_msg(base_cinfo);
2595 }
2596
2597 return 0;
2598 }
2599
scmi_xfer_info_init(struct scmi_info * sinfo)2600 static int scmi_xfer_info_init(struct scmi_info *sinfo)
2601 {
2602 int ret;
2603
2604 ret = scmi_channels_max_msg_configure(sinfo);
2605 if (ret)
2606 return ret;
2607
2608 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2609 if (!ret && !idr_is_empty(&sinfo->rx_idr))
2610 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2611
2612 return ret;
2613 }
2614
scmi_chan_setup(struct scmi_info * info,struct device_node * of_node,int prot_id,bool tx)2615 static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2616 int prot_id, bool tx)
2617 {
2618 int ret, idx;
2619 char name[32];
2620 struct scmi_chan_info *cinfo;
2621 struct idr *idr;
2622 struct scmi_device *tdev = NULL;
2623
2624 /* Transmit channel is first entry i.e. index 0 */
2625 idx = tx ? 0 : 1;
2626 idr = tx ? &info->tx_idr : &info->rx_idr;
2627
2628 if (!info->desc->ops->chan_available(of_node, idx)) {
2629 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2630 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2631 return -EINVAL;
2632 goto idr_alloc;
2633 }
2634
2635 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2636 if (!cinfo)
2637 return -ENOMEM;
2638
2639 cinfo->is_p2a = !tx;
2640 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2641 cinfo->max_msg_size = info->desc->max_msg_size;
2642
2643 /* Create a unique name for this transport device */
2644 snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2645 idx ? "rx" : "tx", prot_id);
2646 /* Create a uniquely named, dedicated transport device for this chan */
2647 tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2648 if (!tdev) {
2649 dev_err(info->dev,
2650 "failed to create transport device (%s)\n", name);
2651 devm_kfree(info->dev, cinfo);
2652 return -EINVAL;
2653 }
2654 of_node_get(of_node);
2655
2656 cinfo->id = prot_id;
2657 cinfo->dev = &tdev->dev;
2658 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2659 if (ret) {
2660 of_node_put(of_node);
2661 scmi_device_destroy(info->dev, prot_id, name);
2662 devm_kfree(info->dev, cinfo);
2663 return ret;
2664 }
2665
2666 if (tx && is_polling_required(cinfo, info->desc)) {
2667 if (is_transport_polling_capable(info->desc))
2668 dev_info(&tdev->dev,
2669 "Enabled polling mode TX channel - prot_id:%d\n",
2670 prot_id);
2671 else
2672 dev_warn(&tdev->dev,
2673 "Polling mode NOT supported by transport.\n");
2674 }
2675
2676 idr_alloc:
2677 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2678 if (ret != prot_id) {
2679 dev_err(info->dev,
2680 "unable to allocate SCMI idr slot err %d\n", ret);
2681 /* Destroy channel and device only if created by this call. */
2682 if (tdev) {
2683 of_node_put(of_node);
2684 scmi_device_destroy(info->dev, prot_id, name);
2685 devm_kfree(info->dev, cinfo);
2686 }
2687 return ret;
2688 }
2689
2690 cinfo->handle = &info->handle;
2691 return 0;
2692 }
2693
2694 static inline int
scmi_txrx_setup(struct scmi_info * info,struct device_node * of_node,int prot_id)2695 scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2696 int prot_id)
2697 {
2698 int ret = scmi_chan_setup(info, of_node, prot_id, true);
2699
2700 if (!ret) {
2701 /* Rx is optional, report only memory errors */
2702 ret = scmi_chan_setup(info, of_node, prot_id, false);
2703 if (ret && ret != -ENOMEM)
2704 ret = 0;
2705 }
2706
2707 if (ret)
2708 dev_err(info->dev,
2709 "failed to setup channel for protocol:0x%X\n", prot_id);
2710
2711 return ret;
2712 }
2713
2714 /**
2715 * scmi_channels_setup - Helper to initialize all required channels
2716 *
2717 * @info: The SCMI instance descriptor.
2718 *
2719 * Initialize all the channels found described in the DT against the underlying
2720 * configured transport using custom defined dedicated devices instead of
2721 * borrowing devices from the SCMI drivers; this way channels are initialized
2722 * upfront during core SCMI stack probing and are no more coupled with SCMI
2723 * devices used by SCMI drivers.
2724 *
2725 * Note that, even though a pair of TX/RX channels is associated to each
2726 * protocol defined in the DT, a distinct freshly initialized channel is
2727 * created only if the DT node for the protocol at hand describes a dedicated
2728 * channel: in all the other cases the common BASE protocol channel is reused.
2729 *
2730 * Return: 0 on Success
2731 */
scmi_channels_setup(struct scmi_info * info)2732 static int scmi_channels_setup(struct scmi_info *info)
2733 {
2734 int ret;
2735 struct device_node *top_np = info->dev->of_node;
2736
2737 /* Initialize a common generic channel at first */
2738 ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2739 if (ret)
2740 return ret;
2741
2742 for_each_available_child_of_node_scoped(top_np, child) {
2743 u32 prot_id;
2744
2745 if (of_property_read_u32(child, "reg", &prot_id))
2746 continue;
2747
2748 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2749 dev_err(info->dev,
2750 "Out of range protocol %d\n", prot_id);
2751
2752 ret = scmi_txrx_setup(info, child, prot_id);
2753 if (ret)
2754 return ret;
2755 }
2756
2757 return 0;
2758 }
2759
scmi_chan_destroy(int id,void * p,void * idr)2760 static int scmi_chan_destroy(int id, void *p, void *idr)
2761 {
2762 struct scmi_chan_info *cinfo = p;
2763
2764 if (cinfo->dev) {
2765 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2766 struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2767
2768 of_node_put(cinfo->dev->of_node);
2769 scmi_device_destroy(info->dev, id, sdev->name);
2770 cinfo->dev = NULL;
2771 }
2772
2773 idr_remove(idr, id);
2774
2775 return 0;
2776 }
2777
scmi_cleanup_channels(struct scmi_info * info,struct idr * idr)2778 static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2779 {
2780 /* At first free all channels at the transport layer ... */
2781 idr_for_each(idr, info->desc->ops->chan_free, idr);
2782
2783 /* ...then destroy all underlying devices */
2784 idr_for_each(idr, scmi_chan_destroy, idr);
2785
2786 idr_destroy(idr);
2787 }
2788
scmi_cleanup_txrx_channels(struct scmi_info * info)2789 static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2790 {
2791 scmi_cleanup_channels(info, &info->tx_idr);
2792
2793 scmi_cleanup_channels(info, &info->rx_idr);
2794 }
2795
scmi_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)2796 static int scmi_bus_notifier(struct notifier_block *nb,
2797 unsigned long action, void *data)
2798 {
2799 struct scmi_info *info = bus_nb_to_scmi_info(nb);
2800 struct scmi_device *sdev = to_scmi_dev(data);
2801
2802 /* Skip transport devices and devices of different SCMI instances */
2803 if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
2804 sdev->dev.parent != info->dev)
2805 return NOTIFY_DONE;
2806
2807 switch (action) {
2808 case BUS_NOTIFY_BIND_DRIVER:
2809 /* setup handle now as the transport is ready */
2810 scmi_set_handle(sdev);
2811 break;
2812 case BUS_NOTIFY_UNBOUND_DRIVER:
2813 scmi_handle_put(sdev->handle);
2814 sdev->handle = NULL;
2815 break;
2816 default:
2817 return NOTIFY_DONE;
2818 }
2819
2820 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2821 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2822 "about to be BOUND." : "UNBOUND.");
2823
2824 return NOTIFY_OK;
2825 }
2826
scmi_device_request_notifier(struct notifier_block * nb,unsigned long action,void * data)2827 static int scmi_device_request_notifier(struct notifier_block *nb,
2828 unsigned long action, void *data)
2829 {
2830 struct device_node *np;
2831 struct scmi_device_id *id_table = data;
2832 struct scmi_info *info = req_nb_to_scmi_info(nb);
2833
2834 np = idr_find(&info->active_protocols, id_table->protocol_id);
2835 if (!np)
2836 return NOTIFY_DONE;
2837
2838 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2839 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2840 id_table->name, id_table->protocol_id);
2841
2842 switch (action) {
2843 case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2844 scmi_create_protocol_devices(np, info, id_table->protocol_id,
2845 id_table->name);
2846 break;
2847 case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2848 scmi_destroy_protocol_devices(info, id_table->protocol_id,
2849 id_table->name);
2850 break;
2851 default:
2852 return NOTIFY_DONE;
2853 }
2854
2855 return NOTIFY_OK;
2856 }
2857
2858 static const char * const dbg_counter_strs[] = {
2859 "sent_ok",
2860 "sent_fail",
2861 "sent_fail_polling_unsupported",
2862 "sent_fail_channel_not_found",
2863 "response_ok",
2864 "notification_ok",
2865 "delayed_response_ok",
2866 "xfers_response_timeout",
2867 "xfers_response_polled_timeout",
2868 "response_polled_ok",
2869 "err_msg_unexpected",
2870 "err_msg_invalid",
2871 "err_msg_nomem",
2872 "err_protocol",
2873 };
2874
reset_all_on_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)2875 static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
2876 size_t count, loff_t *ppos)
2877 {
2878 struct scmi_debug_info *dbg = filp->private_data;
2879
2880 for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
2881 atomic_set(&dbg->counters[i], 0);
2882
2883 return count;
2884 }
2885
2886 static const struct file_operations fops_reset_counts = {
2887 .owner = THIS_MODULE,
2888 .open = simple_open,
2889 .write = reset_all_on_write,
2890 };
2891
scmi_debugfs_counters_setup(struct scmi_debug_info * dbg,struct dentry * trans)2892 static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
2893 struct dentry *trans)
2894 {
2895 struct dentry *counters;
2896 int idx;
2897
2898 counters = debugfs_create_dir("counters", trans);
2899
2900 for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
2901 debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
2902 &dbg->counters[idx]);
2903
2904 debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
2905 }
2906
scmi_debugfs_common_cleanup(void * d)2907 static void scmi_debugfs_common_cleanup(void *d)
2908 {
2909 struct scmi_debug_info *dbg = d;
2910
2911 if (!dbg)
2912 return;
2913
2914 debugfs_remove_recursive(dbg->top_dentry);
2915 kfree(dbg->name);
2916 kfree(dbg->type);
2917 }
2918
scmi_debugfs_common_setup(struct scmi_info * info)2919 static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2920 {
2921 char top_dir[16];
2922 struct dentry *trans, *top_dentry;
2923 struct scmi_debug_info *dbg;
2924 const char *c_ptr = NULL;
2925
2926 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2927 if (!dbg)
2928 return NULL;
2929
2930 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2931 if (!dbg->name) {
2932 devm_kfree(info->dev, dbg);
2933 return NULL;
2934 }
2935
2936 of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2937 dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2938 if (!dbg->type) {
2939 kfree(dbg->name);
2940 devm_kfree(info->dev, dbg);
2941 return NULL;
2942 }
2943
2944 snprintf(top_dir, 16, "%d", info->id);
2945 top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2946 trans = debugfs_create_dir("transport", top_dentry);
2947
2948 dbg->is_atomic = info->desc->atomic_enabled &&
2949 is_transport_polling_capable(info->desc);
2950
2951 debugfs_create_str("instance_name", 0400, top_dentry,
2952 (char **)&dbg->name);
2953
2954 debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2955 (u32 *)&info->desc->atomic_threshold);
2956
2957 debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2958
2959 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2960
2961 debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
2962 (u32 *)&info->desc->max_rx_timeout_ms);
2963
2964 debugfs_create_u32("max_msg_size", 0400, trans,
2965 (u32 *)&info->desc->max_msg_size);
2966
2967 debugfs_create_u32("tx_max_msg", 0400, trans,
2968 (u32 *)&info->tx_minfo.max_msg);
2969
2970 debugfs_create_u32("rx_max_msg", 0400, trans,
2971 (u32 *)&info->rx_minfo.max_msg);
2972
2973 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
2974 scmi_debugfs_counters_setup(dbg, trans);
2975
2976 dbg->top_dentry = top_dentry;
2977
2978 if (devm_add_action_or_reset(info->dev,
2979 scmi_debugfs_common_cleanup, dbg))
2980 return NULL;
2981
2982 return dbg;
2983 }
2984
scmi_debugfs_raw_mode_setup(struct scmi_info * info)2985 static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
2986 {
2987 int id, num_chans = 0, ret = 0;
2988 struct scmi_chan_info *cinfo;
2989 u8 channels[SCMI_MAX_CHANNELS] = {};
2990 DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
2991
2992 if (!info->dbg)
2993 return -EINVAL;
2994
2995 /* Enumerate all channels to collect their ids */
2996 idr_for_each_entry(&info->tx_idr, cinfo, id) {
2997 /*
2998 * Cannot happen, but be defensive.
2999 * Zero as num_chans is ok, warn and carry on.
3000 */
3001 if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
3002 dev_warn(info->dev,
3003 "SCMI RAW - Error enumerating channels\n");
3004 break;
3005 }
3006
3007 if (!test_bit(cinfo->id, protos)) {
3008 channels[num_chans++] = cinfo->id;
3009 set_bit(cinfo->id, protos);
3010 }
3011 }
3012
3013 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3014 info->id, channels, num_chans,
3015 info->desc, info->tx_minfo.max_msg);
3016 if (IS_ERR(info->raw)) {
3017 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3018 ret = PTR_ERR(info->raw);
3019 info->raw = NULL;
3020 }
3021
3022 return ret;
3023 }
3024
scmi_transport_setup(struct device * dev)3025 static const struct scmi_desc *scmi_transport_setup(struct device *dev)
3026 {
3027 struct scmi_transport *trans;
3028 int ret;
3029
3030 trans = dev_get_platdata(dev);
3031 if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
3032 return NULL;
3033
3034 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3035 dev_err(dev,
3036 "Adding link to supplier transport device failed\n");
3037 return NULL;
3038 }
3039
3040 /* Provide core transport ops */
3041 *trans->core_ops = &scmi_trans_core_ops;
3042
3043 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3044
3045 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3046 &trans->desc->max_rx_timeout_ms);
3047 if (ret && ret != -EINVAL)
3048 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3049
3050 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3051 &trans->desc->max_msg_size);
3052 if (ret && ret != -EINVAL)
3053 dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3054
3055 ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3056 &trans->desc->max_msg);
3057 if (ret && ret != -EINVAL)
3058 dev_err(dev, "Malformed arm,max-msg DT property.\n");
3059
3060 dev_info(dev,
3061 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3062 trans->desc->max_rx_timeout_ms, trans->desc->max_msg_size,
3063 trans->desc->max_msg);
3064
3065 /* System wide atomic threshold for atomic ops .. if any */
3066 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3067 &trans->desc->atomic_threshold))
3068 dev_info(dev,
3069 "SCMI System wide atomic threshold set to %u us\n",
3070 trans->desc->atomic_threshold);
3071
3072 return trans->desc;
3073 }
3074
scmi_probe(struct platform_device * pdev)3075 static int scmi_probe(struct platform_device *pdev)
3076 {
3077 int ret;
3078 char *err_str = "probe failure\n";
3079 struct scmi_handle *handle;
3080 const struct scmi_desc *desc;
3081 struct scmi_info *info;
3082 bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
3083 struct device *dev = &pdev->dev;
3084 struct device_node *child, *np = dev->of_node;
3085
3086 desc = scmi_transport_setup(dev);
3087 if (!desc) {
3088 err_str = "transport invalid\n";
3089 ret = -EINVAL;
3090 goto out_err;
3091 }
3092
3093 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3094 if (!info)
3095 return -ENOMEM;
3096
3097 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3098 if (info->id < 0)
3099 return info->id;
3100
3101 info->dev = dev;
3102 info->desc = desc;
3103 info->bus_nb.notifier_call = scmi_bus_notifier;
3104 info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3105 INIT_LIST_HEAD(&info->node);
3106 idr_init(&info->protocols);
3107 mutex_init(&info->protocols_mtx);
3108 idr_init(&info->active_protocols);
3109 mutex_init(&info->devreq_mtx);
3110
3111 platform_set_drvdata(pdev, info);
3112 idr_init(&info->tx_idr);
3113 idr_init(&info->rx_idr);
3114
3115 handle = &info->handle;
3116 handle->dev = info->dev;
3117 handle->version = &info->version;
3118 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3119 handle->devm_protocol_get = scmi_devm_protocol_get;
3120 handle->devm_protocol_put = scmi_devm_protocol_put;
3121 handle->is_transport_atomic = scmi_is_transport_atomic;
3122
3123 /* Setup all channels described in the DT at first */
3124 ret = scmi_channels_setup(info);
3125 if (ret) {
3126 err_str = "failed to setup channels\n";
3127 goto clear_ida;
3128 }
3129
3130 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3131 if (ret) {
3132 err_str = "failed to register bus notifier\n";
3133 goto clear_txrx_setup;
3134 }
3135
3136 ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
3137 &info->dev_req_nb);
3138 if (ret) {
3139 err_str = "failed to register device notifier\n";
3140 goto clear_bus_notifier;
3141 }
3142
3143 ret = scmi_xfer_info_init(info);
3144 if (ret) {
3145 err_str = "failed to init xfers pool\n";
3146 goto clear_dev_req_notifier;
3147 }
3148
3149 if (scmi_top_dentry) {
3150 info->dbg = scmi_debugfs_common_setup(info);
3151 if (!info->dbg)
3152 dev_warn(dev, "Failed to setup SCMI debugfs.\n");
3153
3154 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
3155 ret = scmi_debugfs_raw_mode_setup(info);
3156 if (!coex) {
3157 if (ret)
3158 goto clear_dev_req_notifier;
3159
3160 /* Bail out anyway when coex disabled. */
3161 return 0;
3162 }
3163
3164 /* Coex enabled, carry on in any case. */
3165 dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
3166 }
3167 }
3168
3169 if (scmi_notification_init(handle))
3170 dev_err(dev, "SCMI Notifications NOT available.\n");
3171
3172 if (info->desc->atomic_enabled &&
3173 !is_transport_polling_capable(info->desc))
3174 dev_err(dev,
3175 "Transport is not polling capable. Atomic mode not supported.\n");
3176
3177 /*
3178 * Trigger SCMI Base protocol initialization.
3179 * It's mandatory and won't be ever released/deinit until the
3180 * SCMI stack is shutdown/unloaded as a whole.
3181 */
3182 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
3183 if (ret) {
3184 err_str = "unable to communicate with SCMI\n";
3185 if (coex) {
3186 dev_err(dev, "%s", err_str);
3187 return 0;
3188 }
3189 goto notification_exit;
3190 }
3191
3192 mutex_lock(&scmi_list_mutex);
3193 list_add_tail(&info->node, &scmi_list);
3194 mutex_unlock(&scmi_list_mutex);
3195
3196 for_each_available_child_of_node(np, child) {
3197 u32 prot_id;
3198
3199 if (of_property_read_u32(child, "reg", &prot_id))
3200 continue;
3201
3202 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
3203 dev_err(dev, "Out of range protocol %d\n", prot_id);
3204
3205 if (!scmi_is_protocol_implemented(handle, prot_id)) {
3206 dev_err(dev, "SCMI protocol %d not implemented\n",
3207 prot_id);
3208 continue;
3209 }
3210
3211 /*
3212 * Save this valid DT protocol descriptor amongst
3213 * @active_protocols for this SCMI instance/
3214 */
3215 ret = idr_alloc(&info->active_protocols, child,
3216 prot_id, prot_id + 1, GFP_KERNEL);
3217 if (ret != prot_id) {
3218 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
3219 prot_id);
3220 continue;
3221 }
3222
3223 of_node_get(child);
3224 scmi_create_protocol_devices(child, info, prot_id, NULL);
3225 }
3226
3227 return 0;
3228
3229 notification_exit:
3230 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3231 scmi_raw_mode_cleanup(info->raw);
3232 scmi_notification_exit(&info->handle);
3233 clear_dev_req_notifier:
3234 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3235 &info->dev_req_nb);
3236 clear_bus_notifier:
3237 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3238 clear_txrx_setup:
3239 scmi_cleanup_txrx_channels(info);
3240 clear_ida:
3241 ida_free(&scmi_id, info->id);
3242
3243 out_err:
3244 return dev_err_probe(dev, ret, "%s", err_str);
3245 }
3246
scmi_remove(struct platform_device * pdev)3247 static void scmi_remove(struct platform_device *pdev)
3248 {
3249 int id;
3250 struct scmi_info *info = platform_get_drvdata(pdev);
3251 struct device_node *child;
3252
3253 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3254 scmi_raw_mode_cleanup(info->raw);
3255
3256 mutex_lock(&scmi_list_mutex);
3257 if (info->users)
3258 dev_warn(&pdev->dev,
3259 "Still active SCMI users will be forcibly unbound.\n");
3260 list_del(&info->node);
3261 mutex_unlock(&scmi_list_mutex);
3262
3263 scmi_notification_exit(&info->handle);
3264
3265 mutex_lock(&info->protocols_mtx);
3266 idr_destroy(&info->protocols);
3267 mutex_unlock(&info->protocols_mtx);
3268
3269 idr_for_each_entry(&info->active_protocols, child, id)
3270 of_node_put(child);
3271 idr_destroy(&info->active_protocols);
3272
3273 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3274 &info->dev_req_nb);
3275 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3276
3277 /* Safe to free channels since no more users */
3278 scmi_cleanup_txrx_channels(info);
3279
3280 ida_free(&scmi_id, info->id);
3281 }
3282
protocol_version_show(struct device * dev,struct device_attribute * attr,char * buf)3283 static ssize_t protocol_version_show(struct device *dev,
3284 struct device_attribute *attr, char *buf)
3285 {
3286 struct scmi_info *info = dev_get_drvdata(dev);
3287
3288 return sprintf(buf, "%u.%u\n", info->version.major_ver,
3289 info->version.minor_ver);
3290 }
3291 static DEVICE_ATTR_RO(protocol_version);
3292
firmware_version_show(struct device * dev,struct device_attribute * attr,char * buf)3293 static ssize_t firmware_version_show(struct device *dev,
3294 struct device_attribute *attr, char *buf)
3295 {
3296 struct scmi_info *info = dev_get_drvdata(dev);
3297
3298 return sprintf(buf, "0x%x\n", info->version.impl_ver);
3299 }
3300 static DEVICE_ATTR_RO(firmware_version);
3301
vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3302 static ssize_t vendor_id_show(struct device *dev,
3303 struct device_attribute *attr, char *buf)
3304 {
3305 struct scmi_info *info = dev_get_drvdata(dev);
3306
3307 return sprintf(buf, "%s\n", info->version.vendor_id);
3308 }
3309 static DEVICE_ATTR_RO(vendor_id);
3310
sub_vendor_id_show(struct device * dev,struct device_attribute * attr,char * buf)3311 static ssize_t sub_vendor_id_show(struct device *dev,
3312 struct device_attribute *attr, char *buf)
3313 {
3314 struct scmi_info *info = dev_get_drvdata(dev);
3315
3316 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3317 }
3318 static DEVICE_ATTR_RO(sub_vendor_id);
3319
3320 static struct attribute *versions_attrs[] = {
3321 &dev_attr_firmware_version.attr,
3322 &dev_attr_protocol_version.attr,
3323 &dev_attr_vendor_id.attr,
3324 &dev_attr_sub_vendor_id.attr,
3325 NULL,
3326 };
3327 ATTRIBUTE_GROUPS(versions);
3328
3329 static struct platform_driver scmi_driver = {
3330 .driver = {
3331 .name = "arm-scmi",
3332 .suppress_bind_attrs = true,
3333 .dev_groups = versions_groups,
3334 },
3335 .probe = scmi_probe,
3336 .remove = scmi_remove,
3337 };
3338
scmi_debugfs_init(void)3339 static struct dentry *scmi_debugfs_init(void)
3340 {
3341 struct dentry *d;
3342
3343 d = debugfs_create_dir("scmi", NULL);
3344 if (IS_ERR(d)) {
3345 pr_err("Could NOT create SCMI top dentry.\n");
3346 return NULL;
3347 }
3348
3349 return d;
3350 }
3351
scmi_driver_init(void)3352 static int __init scmi_driver_init(void)
3353 {
3354 /* Bail out if no SCMI transport was configured */
3355 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3356 return -EINVAL;
3357
3358 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
3359 scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
3360
3361 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
3362 scmi_trans_core_ops.msg = scmi_message_operations_get();
3363
3364 if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3365 scmi_top_dentry = scmi_debugfs_init();
3366
3367 scmi_base_register();
3368
3369 scmi_clock_register();
3370 scmi_perf_register();
3371 scmi_power_register();
3372 scmi_reset_register();
3373 scmi_sensors_register();
3374 scmi_voltage_register();
3375 scmi_system_register();
3376 scmi_powercap_register();
3377 scmi_pinctrl_register();
3378
3379 return platform_driver_register(&scmi_driver);
3380 }
3381 module_init(scmi_driver_init);
3382
scmi_driver_exit(void)3383 static void __exit scmi_driver_exit(void)
3384 {
3385 scmi_base_unregister();
3386
3387 scmi_clock_unregister();
3388 scmi_perf_unregister();
3389 scmi_power_unregister();
3390 scmi_reset_unregister();
3391 scmi_sensors_unregister();
3392 scmi_voltage_unregister();
3393 scmi_system_unregister();
3394 scmi_powercap_unregister();
3395 scmi_pinctrl_unregister();
3396
3397 platform_driver_unregister(&scmi_driver);
3398
3399 debugfs_remove_recursive(scmi_top_dentry);
3400 }
3401 module_exit(scmi_driver_exit);
3402
3403 MODULE_ALIAS("platform:arm-scmi");
3404 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3405 MODULE_DESCRIPTION("ARM SCMI protocol driver");
3406 MODULE_LICENSE("GPL v2");
3407