xref: /linux/drivers/firmware/arm_scmi/shmem.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * For transport using shared mem structure.
4  *
5  * Copyright (C) 2019-2024 ARM Ltd.
6  */
7 
8 #include <linux/ktime.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/processor.h>
13 #include <linux/types.h>
14 
15 #include <linux/bug.h>
16 
17 #include "common.h"
18 
19 /*
20  * SCMI specification requires all parameters, message headers, return
21  * arguments or any protocol data to be expressed in little endian
22  * format only.
23  */
24 struct scmi_shared_mem {
25 	__le32 reserved;
26 	__le32 channel_status;
27 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
28 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
29 	__le32 reserved1[2];
30 	__le32 flags;
31 #define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
32 	__le32 length;
33 	__le32 msg_header;
34 	u8 msg_payload[];
35 };
36 
shmem_tx_prepare(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer,struct scmi_chan_info * cinfo)37 static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
38 			     struct scmi_xfer *xfer,
39 			     struct scmi_chan_info *cinfo)
40 {
41 	ktime_t stop;
42 
43 	/*
44 	 * Ideally channel must be free by now unless OS timeout last
45 	 * request and platform continued to process the same, wait
46 	 * until it releases the shared memory, otherwise we may endup
47 	 * overwriting its response with new message payload or vice-versa.
48 	 * Giving up anyway after twice the expected channel timeout so as
49 	 * not to bail-out on intermittent issues where the platform is
50 	 * occasionally a bit slower to answer.
51 	 *
52 	 * Note that after a timeout is detected we bail-out and carry on but
53 	 * the transport functionality is probably permanently compromised:
54 	 * this is just to ease debugging and avoid complete hangs on boot
55 	 * due to a misbehaving SCMI firmware.
56 	 */
57 	stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
58 	spin_until_cond((ioread32(&shmem->channel_status) &
59 			 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
60 			 ktime_after(ktime_get(), stop));
61 	if (!(ioread32(&shmem->channel_status) &
62 	      SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
63 		WARN_ON_ONCE(1);
64 		dev_err(cinfo->dev,
65 			"Timeout waiting for a free TX channel !\n");
66 		return;
67 	}
68 
69 	/* Mark channel busy + clear error */
70 	iowrite32(0x0, &shmem->channel_status);
71 	iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
72 		  &shmem->flags);
73 	iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
74 	iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
75 	if (xfer->tx.buf)
76 		memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
77 }
78 
shmem_read_header(struct scmi_shared_mem __iomem * shmem)79 static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
80 {
81 	return ioread32(&shmem->msg_header);
82 }
83 
shmem_fetch_response(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)84 static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
85 				 struct scmi_xfer *xfer)
86 {
87 	size_t len = ioread32(&shmem->length);
88 
89 	xfer->hdr.status = ioread32(shmem->msg_payload);
90 	/* Skip the length of header and status in shmem area i.e 8 bytes */
91 	xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
92 
93 	/* Take a copy to the rx buffer.. */
94 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
95 }
96 
shmem_fetch_notification(struct scmi_shared_mem __iomem * shmem,size_t max_len,struct scmi_xfer * xfer)97 static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
98 				     size_t max_len, struct scmi_xfer *xfer)
99 {
100 	size_t len = ioread32(&shmem->length);
101 
102 	/* Skip only the length of header in shmem area i.e 4 bytes */
103 	xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
104 
105 	/* Take a copy to the rx buffer.. */
106 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
107 }
108 
shmem_clear_channel(struct scmi_shared_mem __iomem * shmem)109 static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
110 {
111 	iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
112 }
113 
shmem_poll_done(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)114 static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
115 			    struct scmi_xfer *xfer)
116 {
117 	u16 xfer_id;
118 
119 	xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
120 
121 	if (xfer->hdr.seq != xfer_id)
122 		return false;
123 
124 	return ioread32(&shmem->channel_status) &
125 		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
126 		 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
127 }
128 
shmem_channel_free(struct scmi_shared_mem __iomem * shmem)129 static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
130 {
131 	return (ioread32(&shmem->channel_status) &
132 			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
133 }
134 
shmem_channel_intr_enabled(struct scmi_shared_mem __iomem * shmem)135 static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
136 {
137 	return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED;
138 }
139 
shmem_setup_iomap(struct scmi_chan_info * cinfo,struct device * dev,bool tx,struct resource * res)140 static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
141 				       struct device *dev, bool tx,
142 				       struct resource *res)
143 {
144 	struct device_node *shmem __free(device_node);
145 	const char *desc = tx ? "Tx" : "Rx";
146 	int ret, idx = tx ? 0 : 1;
147 	struct device *cdev = cinfo->dev;
148 	struct resource lres = {};
149 	resource_size_t size;
150 	void __iomem *addr;
151 
152 	shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
153 	if (!shmem)
154 		return IOMEM_ERR_PTR(-ENODEV);
155 
156 	if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
157 		return IOMEM_ERR_PTR(-ENXIO);
158 
159 	/* Use a local on-stack as a working area when not provided */
160 	if (!res)
161 		res = &lres;
162 
163 	ret = of_address_to_resource(shmem, 0, res);
164 	if (ret) {
165 		dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
166 		return IOMEM_ERR_PTR(ret);
167 	}
168 
169 	size = resource_size(res);
170 	addr = devm_ioremap(dev, res->start, size);
171 	if (!addr) {
172 		dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
173 		return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
174 	}
175 
176 	return addr;
177 }
178 
179 static const struct scmi_shared_mem_operations scmi_shmem_ops = {
180 	.tx_prepare = shmem_tx_prepare,
181 	.read_header = shmem_read_header,
182 	.fetch_response = shmem_fetch_response,
183 	.fetch_notification = shmem_fetch_notification,
184 	.clear_channel = shmem_clear_channel,
185 	.poll_done = shmem_poll_done,
186 	.channel_free = shmem_channel_free,
187 	.channel_intr_enabled = shmem_channel_intr_enabled,
188 	.setup_iomap = shmem_setup_iomap,
189 };
190 
scmi_shared_mem_operations_get(void)191 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void)
192 {
193 	return &scmi_shmem_ops;
194 }
195