1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * For transport using shared mem structure.
4 *
5 * Copyright (C) 2019-2024 ARM Ltd.
6 */
7
8 #include <linux/ktime.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/processor.h>
13 #include <linux/types.h>
14
15 #include <linux/bug.h>
16
17 #include "common.h"
18
19 #define SCMI_SHMEM_LAYOUT_OVERHEAD 24
20
21 /*
22 * SCMI specification requires all parameters, message headers, return
23 * arguments or any protocol data to be expressed in little endian
24 * format only.
25 */
26 struct scmi_shared_mem {
27 __le32 reserved;
28 __le32 channel_status;
29 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
30 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
31 __le32 reserved1[2];
32 __le32 flags;
33 #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
34 __le32 length;
35 __le32 msg_header;
36 u8 msg_payload[];
37 };
38
shmem_memcpy_fromio32(void * to,const void __iomem * from,size_t count)39 static inline void shmem_memcpy_fromio32(void *to,
40 const void __iomem *from,
41 size_t count)
42 {
43 WARN_ON(!IS_ALIGNED((unsigned long)from, 4) ||
44 !IS_ALIGNED((unsigned long)to, 4) ||
45 count % 4);
46
47 __ioread32_copy(to, from, count / 4);
48 }
49
shmem_memcpy_toio32(void __iomem * to,const void * from,size_t count)50 static inline void shmem_memcpy_toio32(void __iomem *to,
51 const void *from,
52 size_t count)
53 {
54 WARN_ON(!IS_ALIGNED((unsigned long)to, 4) ||
55 !IS_ALIGNED((unsigned long)from, 4) ||
56 count % 4);
57
58 __iowrite32_copy(to, from, count / 4);
59 }
60
61 static struct scmi_shmem_io_ops shmem_io_ops32 = {
62 .fromio = shmem_memcpy_fromio32,
63 .toio = shmem_memcpy_toio32,
64 };
65
66 /* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
67 * pre-processor.
68 */
shmem_memcpy_fromio(void * to,const void __iomem * from,size_t count)69 static inline void shmem_memcpy_fromio(void *to,
70 const void __iomem *from,
71 size_t count)
72 {
73 memcpy_fromio(to, from, count);
74 }
75
shmem_memcpy_toio(void __iomem * to,const void * from,size_t count)76 static inline void shmem_memcpy_toio(void __iomem *to,
77 const void *from,
78 size_t count)
79 {
80 memcpy_toio(to, from, count);
81 }
82
83 static struct scmi_shmem_io_ops shmem_io_ops_default = {
84 .fromio = shmem_memcpy_fromio,
85 .toio = shmem_memcpy_toio,
86 };
87
shmem_tx_prepare(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer,struct scmi_chan_info * cinfo,shmem_copy_toio_t copy_toio)88 static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
89 struct scmi_xfer *xfer,
90 struct scmi_chan_info *cinfo,
91 shmem_copy_toio_t copy_toio)
92 {
93 ktime_t stop;
94
95 /*
96 * Ideally channel must be free by now unless OS timeout last
97 * request and platform continued to process the same, wait
98 * until it releases the shared memory, otherwise we may endup
99 * overwriting its response with new message payload or vice-versa.
100 * Giving up anyway after twice the expected channel timeout so as
101 * not to bail-out on intermittent issues where the platform is
102 * occasionally a bit slower to answer.
103 *
104 * Note that after a timeout is detected we bail-out and carry on but
105 * the transport functionality is probably permanently compromised:
106 * this is just to ease debugging and avoid complete hangs on boot
107 * due to a misbehaving SCMI firmware.
108 */
109 stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
110 spin_until_cond((ioread32(&shmem->channel_status) &
111 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
112 ktime_after(ktime_get(), stop));
113 if (!(ioread32(&shmem->channel_status) &
114 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
115 WARN_ON_ONCE(1);
116 dev_err(cinfo->dev,
117 "Timeout waiting for a free TX channel !\n");
118 return;
119 }
120
121 /* Mark channel busy + clear error */
122 iowrite32(0x0, &shmem->channel_status);
123 iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
124 &shmem->flags);
125 iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
126 iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
127 if (xfer->tx.buf)
128 copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
129 }
130
shmem_read_header(struct scmi_shared_mem __iomem * shmem)131 static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
132 {
133 return ioread32(&shmem->msg_header);
134 }
135
shmem_fetch_response(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer,shmem_copy_fromio_t copy_fromio)136 static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
137 struct scmi_xfer *xfer,
138 shmem_copy_fromio_t copy_fromio)
139 {
140 size_t len = ioread32(&shmem->length);
141
142 xfer->hdr.status = ioread32(shmem->msg_payload);
143 /* Skip the length of header and status in shmem area i.e 8 bytes */
144 xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
145
146 /* Take a copy to the rx buffer.. */
147 copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
148 }
149
shmem_fetch_notification(struct scmi_shared_mem __iomem * shmem,size_t max_len,struct scmi_xfer * xfer,shmem_copy_fromio_t copy_fromio)150 static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
151 size_t max_len, struct scmi_xfer *xfer,
152 shmem_copy_fromio_t copy_fromio)
153 {
154 size_t len = ioread32(&shmem->length);
155
156 /* Skip only the length of header in shmem area i.e 4 bytes */
157 xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
158
159 /* Take a copy to the rx buffer.. */
160 copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
161 }
162
shmem_clear_channel(struct scmi_shared_mem __iomem * shmem)163 static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
164 {
165 iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
166 }
167
shmem_poll_done(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)168 static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
169 struct scmi_xfer *xfer)
170 {
171 u16 xfer_id;
172
173 xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
174
175 if (xfer->hdr.seq != xfer_id)
176 return false;
177
178 return ioread32(&shmem->channel_status) &
179 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
180 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
181 }
182
shmem_channel_free(struct scmi_shared_mem __iomem * shmem)183 static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
184 {
185 return (ioread32(&shmem->channel_status) &
186 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
187 }
188
shmem_channel_intr_enabled(struct scmi_shared_mem __iomem * shmem)189 static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
190 {
191 return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED;
192 }
193
shmem_setup_iomap(struct scmi_chan_info * cinfo,struct device * dev,bool tx,struct resource * res,struct scmi_shmem_io_ops ** ops)194 static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
195 struct device *dev, bool tx,
196 struct resource *res,
197 struct scmi_shmem_io_ops **ops)
198 {
199 struct device_node *shmem __free(device_node);
200 const char *desc = tx ? "Tx" : "Rx";
201 int ret, idx = tx ? 0 : 1;
202 struct device *cdev = cinfo->dev;
203 struct resource lres = {};
204 resource_size_t size;
205 void __iomem *addr;
206 u32 reg_io_width;
207
208 shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
209 if (!shmem)
210 return IOMEM_ERR_PTR(-ENODEV);
211
212 if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
213 return IOMEM_ERR_PTR(-ENXIO);
214
215 /* Use a local on-stack as a working area when not provided */
216 if (!res)
217 res = &lres;
218
219 ret = of_address_to_resource(shmem, 0, res);
220 if (ret) {
221 dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
222 return IOMEM_ERR_PTR(ret);
223 }
224
225 size = resource_size(res);
226 if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) {
227 dev_err(dev, "misconfigured SCMI shared memory\n");
228 return IOMEM_ERR_PTR(-ENOSPC);
229 }
230
231 addr = devm_ioremap(dev, res->start, size);
232 if (!addr) {
233 dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
234 return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
235 }
236
237 of_property_read_u32(shmem, "reg-io-width", ®_io_width);
238 switch (reg_io_width) {
239 case 4:
240 *ops = &shmem_io_ops32;
241 break;
242 default:
243 *ops = &shmem_io_ops_default;
244 break;
245 }
246
247 return addr;
248 }
249
250 static const struct scmi_shared_mem_operations scmi_shmem_ops = {
251 .tx_prepare = shmem_tx_prepare,
252 .read_header = shmem_read_header,
253 .fetch_response = shmem_fetch_response,
254 .fetch_notification = shmem_fetch_notification,
255 .clear_channel = shmem_clear_channel,
256 .poll_done = shmem_poll_done,
257 .channel_free = shmem_channel_free,
258 .channel_intr_enabled = shmem_channel_intr_enabled,
259 .setup_iomap = shmem_setup_iomap,
260 };
261
scmi_shared_mem_operations_get(void)262 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void)
263 {
264 return &scmi_shmem_ops;
265 }
266