xref: /linux/drivers/firmware/arm_scmi/driver.c (revision bd4af432cc71b5fbfe4833510359a6ad3ada250d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Message Protocol driver
4  *
5  * SCMI Message Protocol is used between the System Control Processor(SCP)
6  * and the Application Processors(AP). The Message Handling Unit(MHU)
7  * provides a mechanism for inter-processor communication between SCP's
8  * Cortex M3 and AP.
9  *
10  * SCP offers control and management of the core/cluster power states,
11  * various power domain DVFS including the core/cluster, certain system
12  * clocks configuration, thermal sensors and many others.
13  *
14  * Copyright (C) 2018 ARM Ltd.
15  */
16 
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/processor.h>
26 #include <linux/slab.h>
27 
28 #include "common.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/scmi.h>
32 
33 enum scmi_error_codes {
34 	SCMI_SUCCESS = 0,	/* Success */
35 	SCMI_ERR_SUPPORT = -1,	/* Not supported */
36 	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
37 	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
38 	SCMI_ERR_ENTRY = -4,	/* Not found */
39 	SCMI_ERR_RANGE = -5,	/* Value out of range */
40 	SCMI_ERR_BUSY = -6,	/* Device busy */
41 	SCMI_ERR_COMMS = -7,	/* Communication Error */
42 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
43 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
44 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
45 	SCMI_ERR_MAX
46 };
47 
48 /* List of all SCMI devices active in system */
49 static LIST_HEAD(scmi_list);
50 /* Protection for the entire list */
51 static DEFINE_MUTEX(scmi_list_mutex);
52 /* Track the unique id for the transfers for debug & profiling purpose */
53 static atomic_t transfer_last_id;
54 
55 /**
56  * struct scmi_xfers_info - Structure to manage transfer information
57  *
58  * @xfer_block: Preallocated Message array
59  * @xfer_alloc_table: Bitmap table for allocated messages.
60  *	Index of this bitmap table is also used for message
61  *	sequence identifier.
62  * @xfer_lock: Protection for message allocation
63  */
64 struct scmi_xfers_info {
65 	struct scmi_xfer *xfer_block;
66 	unsigned long *xfer_alloc_table;
67 	spinlock_t xfer_lock;
68 };
69 
70 /**
71  * struct scmi_info - Structure representing a SCMI instance
72  *
73  * @dev: Device pointer
74  * @desc: SoC description for this instance
75  * @version: SCMI revision information containing protocol version,
76  *	implementation version and (sub-)vendor identification.
77  * @handle: Instance of SCMI handle to send to clients
78  * @tx_minfo: Universal Transmit Message management info
79  * @tx_idr: IDR object to map protocol id to Tx channel info pointer
80  * @rx_idr: IDR object to map protocol id to Rx channel info pointer
81  * @protocols_imp: List of protocols implemented, currently maximum of
82  *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
83  * @node: List head
84  * @users: Number of users of this instance
85  */
86 struct scmi_info {
87 	struct device *dev;
88 	const struct scmi_desc *desc;
89 	struct scmi_revision_info version;
90 	struct scmi_handle handle;
91 	struct scmi_xfers_info tx_minfo;
92 	struct idr tx_idr;
93 	struct idr rx_idr;
94 	u8 *protocols_imp;
95 	struct list_head node;
96 	int users;
97 };
98 
99 #define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
100 
101 static const int scmi_linux_errmap[] = {
102 	/* better than switch case as long as return value is continuous */
103 	0,			/* SCMI_SUCCESS */
104 	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
105 	-EINVAL,		/* SCMI_ERR_PARAM */
106 	-EACCES,		/* SCMI_ERR_ACCESS */
107 	-ENOENT,		/* SCMI_ERR_ENTRY */
108 	-ERANGE,		/* SCMI_ERR_RANGE */
109 	-EBUSY,			/* SCMI_ERR_BUSY */
110 	-ECOMM,			/* SCMI_ERR_COMMS */
111 	-EIO,			/* SCMI_ERR_GENERIC */
112 	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
113 	-EPROTO,		/* SCMI_ERR_PROTOCOL */
114 };
115 
116 static inline int scmi_to_linux_errno(int errno)
117 {
118 	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
119 		return scmi_linux_errmap[-errno];
120 	return -EIO;
121 }
122 
123 /**
124  * scmi_dump_header_dbg() - Helper to dump a message header.
125  *
126  * @dev: Device pointer corresponding to the SCMI entity
127  * @hdr: pointer to header.
128  */
129 static inline void scmi_dump_header_dbg(struct device *dev,
130 					struct scmi_msg_hdr *hdr)
131 {
132 	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
133 		hdr->id, hdr->seq, hdr->protocol_id);
134 }
135 
136 /**
137  * scmi_xfer_get() - Allocate one message
138  *
139  * @handle: Pointer to SCMI entity handle
140  * @minfo: Pointer to Tx/Rx Message management info based on channel type
141  *
142  * Helper function which is used by various message functions that are
143  * exposed to clients of this driver for allocating a message traffic event.
144  *
145  * This function can sleep depending on pending requests already in the system
146  * for the SCMI entity. Further, this also holds a spinlock to maintain
147  * integrity of internal data structures.
148  *
149  * Return: 0 if all went fine, else corresponding error.
150  */
151 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
152 				       struct scmi_xfers_info *minfo)
153 {
154 	u16 xfer_id;
155 	struct scmi_xfer *xfer;
156 	unsigned long flags, bit_pos;
157 	struct scmi_info *info = handle_to_scmi_info(handle);
158 
159 	/* Keep the locked section as small as possible */
160 	spin_lock_irqsave(&minfo->xfer_lock, flags);
161 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
162 				      info->desc->max_msg);
163 	if (bit_pos == info->desc->max_msg) {
164 		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
165 		return ERR_PTR(-ENOMEM);
166 	}
167 	set_bit(bit_pos, minfo->xfer_alloc_table);
168 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
169 
170 	xfer_id = bit_pos;
171 
172 	xfer = &minfo->xfer_block[xfer_id];
173 	xfer->hdr.seq = xfer_id;
174 	reinit_completion(&xfer->done);
175 	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
176 
177 	return xfer;
178 }
179 
180 /**
181  * __scmi_xfer_put() - Release a message
182  *
183  * @minfo: Pointer to Tx/Rx Message management info based on channel type
184  * @xfer: message that was reserved by scmi_xfer_get
185  *
186  * This holds a spinlock to maintain integrity of internal data structures.
187  */
188 static void
189 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
190 {
191 	unsigned long flags;
192 
193 	/*
194 	 * Keep the locked section as small as possible
195 	 * NOTE: we might escape with smp_mb and no lock here..
196 	 * but just be conservative and symmetric.
197 	 */
198 	spin_lock_irqsave(&minfo->xfer_lock, flags);
199 	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
200 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
201 }
202 
203 /**
204  * scmi_rx_callback() - callback for receiving messages
205  *
206  * @cinfo: SCMI channel info
207  * @msg_hdr: Message header
208  *
209  * Processes one received message to appropriate transfer information and
210  * signals completion of the transfer.
211  *
212  * NOTE: This function will be invoked in IRQ context, hence should be
213  * as optimal as possible.
214  */
215 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
216 {
217 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
218 	struct scmi_xfers_info *minfo = &info->tx_minfo;
219 	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
220 	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
221 	struct device *dev = cinfo->dev;
222 	struct scmi_xfer *xfer;
223 
224 	if (msg_type == MSG_TYPE_NOTIFICATION)
225 		return; /* Notifications not yet supported */
226 
227 	/* Are we even expecting this? */
228 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
229 		dev_err(dev, "message for %d is not expected!\n", xfer_id);
230 		return;
231 	}
232 
233 	xfer = &minfo->xfer_block[xfer_id];
234 
235 	scmi_dump_header_dbg(dev, &xfer->hdr);
236 
237 	info->desc->ops->fetch_response(cinfo, xfer);
238 
239 	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
240 			   xfer->hdr.protocol_id, xfer->hdr.seq,
241 			   msg_type);
242 
243 	if (msg_type == MSG_TYPE_DELAYED_RESP)
244 		complete(xfer->async_done);
245 	else
246 		complete(&xfer->done);
247 }
248 
249 /**
250  * scmi_xfer_put() - Release a transmit message
251  *
252  * @handle: Pointer to SCMI entity handle
253  * @xfer: message that was reserved by scmi_xfer_get
254  */
255 void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
256 {
257 	struct scmi_info *info = handle_to_scmi_info(handle);
258 
259 	__scmi_xfer_put(&info->tx_minfo, xfer);
260 }
261 
262 #define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
263 
264 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
265 				      struct scmi_xfer *xfer, ktime_t stop)
266 {
267 	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
268 
269 	return info->desc->ops->poll_done(cinfo, xfer) ||
270 	       ktime_after(ktime_get(), stop);
271 }
272 
273 /**
274  * scmi_do_xfer() - Do one transfer
275  *
276  * @handle: Pointer to SCMI entity handle
277  * @xfer: Transfer to initiate and wait for response
278  *
279  * Return: -ETIMEDOUT in case of no response, if transmit error,
280  *	return corresponding error, else if all goes well,
281  *	return 0.
282  */
283 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
284 {
285 	int ret;
286 	int timeout;
287 	struct scmi_info *info = handle_to_scmi_info(handle);
288 	struct device *dev = info->dev;
289 	struct scmi_chan_info *cinfo;
290 
291 	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
292 	if (unlikely(!cinfo))
293 		return -EINVAL;
294 
295 	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
296 			      xfer->hdr.protocol_id, xfer->hdr.seq,
297 			      xfer->hdr.poll_completion);
298 
299 	ret = info->desc->ops->send_message(cinfo, xfer);
300 	if (ret < 0) {
301 		dev_dbg(dev, "Failed to send message %d\n", ret);
302 		return ret;
303 	}
304 
305 	if (xfer->hdr.poll_completion) {
306 		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
307 
308 		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
309 
310 		if (ktime_before(ktime_get(), stop))
311 			info->desc->ops->fetch_response(cinfo, xfer);
312 		else
313 			ret = -ETIMEDOUT;
314 	} else {
315 		/* And we wait for the response. */
316 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
317 		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
318 			dev_err(dev, "timed out in resp(caller: %pS)\n",
319 				(void *)_RET_IP_);
320 			ret = -ETIMEDOUT;
321 		}
322 	}
323 
324 	if (!ret && xfer->hdr.status)
325 		ret = scmi_to_linux_errno(xfer->hdr.status);
326 
327 	if (info->desc->ops->mark_txdone)
328 		info->desc->ops->mark_txdone(cinfo, ret);
329 
330 	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
331 			    xfer->hdr.protocol_id, xfer->hdr.seq,
332 			    xfer->hdr.status);
333 
334 	return ret;
335 }
336 
337 #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
338 
339 /**
340  * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
341  *	response is received
342  *
343  * @handle: Pointer to SCMI entity handle
344  * @xfer: Transfer to initiate and wait for response
345  *
346  * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
347  *	return corresponding error, else if all goes well, return 0.
348  */
349 int scmi_do_xfer_with_response(const struct scmi_handle *handle,
350 			       struct scmi_xfer *xfer)
351 {
352 	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
353 	DECLARE_COMPLETION_ONSTACK(async_response);
354 
355 	xfer->async_done = &async_response;
356 
357 	ret = scmi_do_xfer(handle, xfer);
358 	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
359 		ret = -ETIMEDOUT;
360 
361 	xfer->async_done = NULL;
362 	return ret;
363 }
364 
365 /**
366  * scmi_xfer_get_init() - Allocate and initialise one message for transmit
367  *
368  * @handle: Pointer to SCMI entity handle
369  * @msg_id: Message identifier
370  * @prot_id: Protocol identifier for the message
371  * @tx_size: transmit message size
372  * @rx_size: receive message size
373  * @p: pointer to the allocated and initialised message
374  *
375  * This function allocates the message using @scmi_xfer_get and
376  * initialise the header.
377  *
378  * Return: 0 if all went fine with @p pointing to message, else
379  *	corresponding error.
380  */
381 int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
382 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
383 {
384 	int ret;
385 	struct scmi_xfer *xfer;
386 	struct scmi_info *info = handle_to_scmi_info(handle);
387 	struct scmi_xfers_info *minfo = &info->tx_minfo;
388 	struct device *dev = info->dev;
389 
390 	/* Ensure we have sane transfer sizes */
391 	if (rx_size > info->desc->max_msg_size ||
392 	    tx_size > info->desc->max_msg_size)
393 		return -ERANGE;
394 
395 	xfer = scmi_xfer_get(handle, minfo);
396 	if (IS_ERR(xfer)) {
397 		ret = PTR_ERR(xfer);
398 		dev_err(dev, "failed to get free message slot(%d)\n", ret);
399 		return ret;
400 	}
401 
402 	xfer->tx.len = tx_size;
403 	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
404 	xfer->hdr.id = msg_id;
405 	xfer->hdr.protocol_id = prot_id;
406 	xfer->hdr.poll_completion = false;
407 
408 	*p = xfer;
409 
410 	return 0;
411 }
412 
413 /**
414  * scmi_version_get() - command to get the revision of the SCMI entity
415  *
416  * @handle: Pointer to SCMI entity handle
417  * @protocol: Protocol identifier for the message
418  * @version: Holds returned version of protocol.
419  *
420  * Updates the SCMI information in the internal data structure.
421  *
422  * Return: 0 if all went fine, else return appropriate error.
423  */
424 int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
425 		     u32 *version)
426 {
427 	int ret;
428 	__le32 *rev_info;
429 	struct scmi_xfer *t;
430 
431 	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
432 				 sizeof(*version), &t);
433 	if (ret)
434 		return ret;
435 
436 	ret = scmi_do_xfer(handle, t);
437 	if (!ret) {
438 		rev_info = t->rx.buf;
439 		*version = le32_to_cpu(*rev_info);
440 	}
441 
442 	scmi_xfer_put(handle, t);
443 	return ret;
444 }
445 
446 void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
447 				     u8 *prot_imp)
448 {
449 	struct scmi_info *info = handle_to_scmi_info(handle);
450 
451 	info->protocols_imp = prot_imp;
452 }
453 
454 static bool
455 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
456 {
457 	int i;
458 	struct scmi_info *info = handle_to_scmi_info(handle);
459 
460 	if (!info->protocols_imp)
461 		return false;
462 
463 	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
464 		if (info->protocols_imp[i] == prot_id)
465 			return true;
466 	return false;
467 }
468 
469 /**
470  * scmi_handle_get() - Get the SCMI handle for a device
471  *
472  * @dev: pointer to device for which we want SCMI handle
473  *
474  * NOTE: The function does not track individual clients of the framework
475  * and is expected to be maintained by caller of SCMI protocol library.
476  * scmi_handle_put must be balanced with successful scmi_handle_get
477  *
478  * Return: pointer to handle if successful, NULL on error
479  */
480 struct scmi_handle *scmi_handle_get(struct device *dev)
481 {
482 	struct list_head *p;
483 	struct scmi_info *info;
484 	struct scmi_handle *handle = NULL;
485 
486 	mutex_lock(&scmi_list_mutex);
487 	list_for_each(p, &scmi_list) {
488 		info = list_entry(p, struct scmi_info, node);
489 		if (dev->parent == info->dev) {
490 			handle = &info->handle;
491 			info->users++;
492 			break;
493 		}
494 	}
495 	mutex_unlock(&scmi_list_mutex);
496 
497 	return handle;
498 }
499 
500 /**
501  * scmi_handle_put() - Release the handle acquired by scmi_handle_get
502  *
503  * @handle: handle acquired by scmi_handle_get
504  *
505  * NOTE: The function does not track individual clients of the framework
506  * and is expected to be maintained by caller of SCMI protocol library.
507  * scmi_handle_put must be balanced with successful scmi_handle_get
508  *
509  * Return: 0 is successfully released
510  *	if null was passed, it returns -EINVAL;
511  */
512 int scmi_handle_put(const struct scmi_handle *handle)
513 {
514 	struct scmi_info *info;
515 
516 	if (!handle)
517 		return -EINVAL;
518 
519 	info = handle_to_scmi_info(handle);
520 	mutex_lock(&scmi_list_mutex);
521 	if (!WARN_ON(!info->users))
522 		info->users--;
523 	mutex_unlock(&scmi_list_mutex);
524 
525 	return 0;
526 }
527 
528 static int scmi_xfer_info_init(struct scmi_info *sinfo)
529 {
530 	int i;
531 	struct scmi_xfer *xfer;
532 	struct device *dev = sinfo->dev;
533 	const struct scmi_desc *desc = sinfo->desc;
534 	struct scmi_xfers_info *info = &sinfo->tx_minfo;
535 
536 	/* Pre-allocated messages, no more than what hdr.seq can support */
537 	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
538 		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
539 			desc->max_msg, MSG_TOKEN_MAX);
540 		return -EINVAL;
541 	}
542 
543 	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
544 					sizeof(*info->xfer_block), GFP_KERNEL);
545 	if (!info->xfer_block)
546 		return -ENOMEM;
547 
548 	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
549 					      sizeof(long), GFP_KERNEL);
550 	if (!info->xfer_alloc_table)
551 		return -ENOMEM;
552 
553 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
554 	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
555 		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
556 					    GFP_KERNEL);
557 		if (!xfer->rx.buf)
558 			return -ENOMEM;
559 
560 		xfer->tx.buf = xfer->rx.buf;
561 		init_completion(&xfer->done);
562 	}
563 
564 	spin_lock_init(&info->xfer_lock);
565 
566 	return 0;
567 }
568 
569 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
570 			   int prot_id, bool tx)
571 {
572 	int ret, idx;
573 	struct scmi_chan_info *cinfo;
574 	struct idr *idr;
575 
576 	/* Transmit channel is first entry i.e. index 0 */
577 	idx = tx ? 0 : 1;
578 	idr = tx ? &info->tx_idr : &info->rx_idr;
579 
580 	/* check if already allocated, used for multiple device per protocol */
581 	cinfo = idr_find(idr, prot_id);
582 	if (cinfo)
583 		return 0;
584 
585 	if (!info->desc->ops->chan_available(dev, idx)) {
586 		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
587 		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
588 			return -EINVAL;
589 		goto idr_alloc;
590 	}
591 
592 	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
593 	if (!cinfo)
594 		return -ENOMEM;
595 
596 	cinfo->dev = dev;
597 
598 	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
599 	if (ret)
600 		return ret;
601 
602 idr_alloc:
603 	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
604 	if (ret != prot_id) {
605 		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
606 		return ret;
607 	}
608 
609 	cinfo->handle = &info->handle;
610 	return 0;
611 }
612 
613 static inline int
614 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
615 {
616 	int ret = scmi_chan_setup(info, dev, prot_id, true);
617 
618 	if (!ret) /* Rx is optional, hence no error check */
619 		scmi_chan_setup(info, dev, prot_id, false);
620 
621 	return ret;
622 }
623 
624 static inline void
625 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
626 			    int prot_id, const char *name)
627 {
628 	struct scmi_device *sdev;
629 
630 	sdev = scmi_device_create(np, info->dev, prot_id, name);
631 	if (!sdev) {
632 		dev_err(info->dev, "failed to create %d protocol device\n",
633 			prot_id);
634 		return;
635 	}
636 
637 	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
638 		dev_err(&sdev->dev, "failed to setup transport\n");
639 		scmi_device_destroy(sdev);
640 		return;
641 	}
642 
643 	/* setup handle now as the transport is ready */
644 	scmi_set_handle(sdev);
645 }
646 
647 #define MAX_SCMI_DEV_PER_PROTOCOL	2
648 struct scmi_prot_devnames {
649 	int protocol_id;
650 	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
651 };
652 
653 static struct scmi_prot_devnames devnames[] = {
654 	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
655 	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
656 	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
657 	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
658 	{ SCMI_PROTOCOL_RESET,  { "reset" },},
659 };
660 
661 static inline void
662 scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
663 			     int prot_id)
664 {
665 	int loop, cnt;
666 
667 	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
668 		if (devnames[loop].protocol_id != prot_id)
669 			continue;
670 
671 		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
672 			const char *name = devnames[loop].names[cnt];
673 
674 			if (name)
675 				scmi_create_protocol_device(np, info, prot_id,
676 							    name);
677 		}
678 	}
679 }
680 
681 static int scmi_probe(struct platform_device *pdev)
682 {
683 	int ret;
684 	struct scmi_handle *handle;
685 	const struct scmi_desc *desc;
686 	struct scmi_info *info;
687 	struct device *dev = &pdev->dev;
688 	struct device_node *child, *np = dev->of_node;
689 
690 	desc = of_device_get_match_data(dev);
691 	if (!desc)
692 		return -EINVAL;
693 
694 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
695 	if (!info)
696 		return -ENOMEM;
697 
698 	info->dev = dev;
699 	info->desc = desc;
700 	INIT_LIST_HEAD(&info->node);
701 
702 	ret = scmi_xfer_info_init(info);
703 	if (ret)
704 		return ret;
705 
706 	platform_set_drvdata(pdev, info);
707 	idr_init(&info->tx_idr);
708 	idr_init(&info->rx_idr);
709 
710 	handle = &info->handle;
711 	handle->dev = info->dev;
712 	handle->version = &info->version;
713 
714 	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
715 	if (ret)
716 		return ret;
717 
718 	ret = scmi_base_protocol_init(handle);
719 	if (ret) {
720 		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
721 		return ret;
722 	}
723 
724 	mutex_lock(&scmi_list_mutex);
725 	list_add_tail(&info->node, &scmi_list);
726 	mutex_unlock(&scmi_list_mutex);
727 
728 	for_each_available_child_of_node(np, child) {
729 		u32 prot_id;
730 
731 		if (of_property_read_u32(child, "reg", &prot_id))
732 			continue;
733 
734 		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
735 			dev_err(dev, "Out of range protocol %d\n", prot_id);
736 
737 		if (!scmi_is_protocol_implemented(handle, prot_id)) {
738 			dev_err(dev, "SCMI protocol %d not implemented\n",
739 				prot_id);
740 			continue;
741 		}
742 
743 		scmi_create_protocol_devices(child, info, prot_id);
744 	}
745 
746 	return 0;
747 }
748 
749 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
750 {
751 	idr_remove(idr, id);
752 }
753 
754 static int scmi_remove(struct platform_device *pdev)
755 {
756 	int ret = 0;
757 	struct scmi_info *info = platform_get_drvdata(pdev);
758 	struct idr *idr = &info->tx_idr;
759 
760 	mutex_lock(&scmi_list_mutex);
761 	if (info->users)
762 		ret = -EBUSY;
763 	else
764 		list_del(&info->node);
765 	mutex_unlock(&scmi_list_mutex);
766 
767 	if (ret)
768 		return ret;
769 
770 	/* Safe to free channels since no more users */
771 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
772 	idr_destroy(&info->tx_idr);
773 
774 	idr = &info->rx_idr;
775 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
776 	idr_destroy(&info->rx_idr);
777 
778 	return ret;
779 }
780 
781 static ssize_t protocol_version_show(struct device *dev,
782 				     struct device_attribute *attr, char *buf)
783 {
784 	struct scmi_info *info = dev_get_drvdata(dev);
785 
786 	return sprintf(buf, "%u.%u\n", info->version.major_ver,
787 		       info->version.minor_ver);
788 }
789 static DEVICE_ATTR_RO(protocol_version);
790 
791 static ssize_t firmware_version_show(struct device *dev,
792 				     struct device_attribute *attr, char *buf)
793 {
794 	struct scmi_info *info = dev_get_drvdata(dev);
795 
796 	return sprintf(buf, "0x%x\n", info->version.impl_ver);
797 }
798 static DEVICE_ATTR_RO(firmware_version);
799 
800 static ssize_t vendor_id_show(struct device *dev,
801 			      struct device_attribute *attr, char *buf)
802 {
803 	struct scmi_info *info = dev_get_drvdata(dev);
804 
805 	return sprintf(buf, "%s\n", info->version.vendor_id);
806 }
807 static DEVICE_ATTR_RO(vendor_id);
808 
809 static ssize_t sub_vendor_id_show(struct device *dev,
810 				  struct device_attribute *attr, char *buf)
811 {
812 	struct scmi_info *info = dev_get_drvdata(dev);
813 
814 	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
815 }
816 static DEVICE_ATTR_RO(sub_vendor_id);
817 
818 static struct attribute *versions_attrs[] = {
819 	&dev_attr_firmware_version.attr,
820 	&dev_attr_protocol_version.attr,
821 	&dev_attr_vendor_id.attr,
822 	&dev_attr_sub_vendor_id.attr,
823 	NULL,
824 };
825 ATTRIBUTE_GROUPS(versions);
826 
827 /* Each compatible listed below must have descriptor associated with it */
828 static const struct of_device_id scmi_of_match[] = {
829 	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
830 	{ /* Sentinel */ },
831 };
832 
833 MODULE_DEVICE_TABLE(of, scmi_of_match);
834 
835 static struct platform_driver scmi_driver = {
836 	.driver = {
837 		   .name = "arm-scmi",
838 		   .of_match_table = scmi_of_match,
839 		   .dev_groups = versions_groups,
840 		   },
841 	.probe = scmi_probe,
842 	.remove = scmi_remove,
843 };
844 
845 module_platform_driver(scmi_driver);
846 
847 MODULE_ALIAS("platform: arm-scmi");
848 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
849 MODULE_DESCRIPTION("ARM SCMI protocol driver");
850 MODULE_LICENSE("GPL v2");
851