1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message SMC/HVC
4 * Transport driver
5 *
6 * Copyright 2020 NXP
7 */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/atomic.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/limits.h>
19 #include <linux/platform_device.h>
20 #include <linux/processor.h>
21 #include <linux/slab.h>
22
23 #include "../common.h"
24
25 /*
26 * The shmem address is split into 4K page and offset.
27 * This is to make sure the parameters fit in 32bit arguments of the
28 * smc/hvc call to keep it uniform across smc32/smc64 conventions.
29 * This however limits the shmem address to 44 bit.
30 *
31 * These optional parameters can be used to distinguish among multiple
32 * scmi instances that are using the same smc-id.
33 * The page parameter is passed in r1/x1/w1 register and the offset parameter
34 * is passed in r2/x2/w2 register.
35 */
36
37 #define SHMEM_SIZE (SZ_4K)
38 #define SHMEM_SHIFT 12
39 #define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
40 #define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
41
42 /**
43 * struct scmi_smc - Structure representing a SCMI smc transport
44 *
45 * @irq: An optional IRQ for completion
46 * @cinfo: SCMI channel info
47 * @shmem: Transmit/Receive shared memory area
48 * @io_ops: Transport specific I/O operations
49 * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
50 * Used when NOT operating in atomic mode.
51 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
52 * Used when operating in atomic mode.
53 * @func_id: smc/hvc call function id
54 * @param_page: 4K page number of the shmem channel
55 * @param_offset: Offset within the 4K page of the shmem channel
56 * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual
57 * platforms
58 */
59
60 struct scmi_smc {
61 int irq;
62 struct scmi_chan_info *cinfo;
63 struct scmi_shared_mem __iomem *shmem;
64 struct scmi_shmem_io_ops *io_ops;
65 /* Protect access to shmem area */
66 struct mutex shmem_lock;
67 #define INFLIGHT_NONE MSG_TOKEN_MAX
68 atomic_t inflight;
69 unsigned long func_id;
70 unsigned long param_page;
71 unsigned long param_offset;
72 unsigned long cap_id;
73 };
74
75 static struct scmi_transport_core_operations *core;
76
smc_msg_done_isr(int irq,void * data)77 static irqreturn_t smc_msg_done_isr(int irq, void *data)
78 {
79 struct scmi_smc *scmi_info = data;
80
81 core->rx_callback(scmi_info->cinfo,
82 core->shmem->read_header(scmi_info->shmem), NULL);
83
84 return IRQ_HANDLED;
85 }
86
smc_chan_available(struct device_node * of_node,int idx)87 static bool smc_chan_available(struct device_node *of_node, int idx)
88 {
89 struct device_node *np __free(device_node) =
90 of_parse_phandle(of_node, "shmem", 0);
91 if (!np)
92 return false;
93
94 return true;
95 }
96
smc_channel_lock_init(struct scmi_smc * scmi_info)97 static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
98 {
99 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
100 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
101 else
102 mutex_init(&scmi_info->shmem_lock);
103 }
104
smc_xfer_inflight(struct scmi_xfer * xfer,atomic_t * inflight)105 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
106 {
107 int ret;
108
109 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
110
111 return ret == INFLIGHT_NONE;
112 }
113
114 static inline void
smc_channel_lock_acquire(struct scmi_smc * scmi_info,struct scmi_xfer * xfer __maybe_unused)115 smc_channel_lock_acquire(struct scmi_smc *scmi_info,
116 struct scmi_xfer *xfer __maybe_unused)
117 {
118 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
119 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
120 else
121 mutex_lock(&scmi_info->shmem_lock);
122 }
123
smc_channel_lock_release(struct scmi_smc * scmi_info)124 static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
125 {
126 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
127 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
128 else
129 mutex_unlock(&scmi_info->shmem_lock);
130 }
131
smc_chan_setup(struct scmi_chan_info * cinfo,struct device * dev,bool tx)132 static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
133 bool tx)
134 {
135 struct device *cdev = cinfo->dev;
136 unsigned long cap_id = ULONG_MAX;
137 struct scmi_smc *scmi_info;
138 struct resource res = {};
139 u32 func_id;
140 int ret;
141
142 if (!tx)
143 return -ENODEV;
144
145 scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
146 if (!scmi_info)
147 return -ENOMEM;
148
149 scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res,
150 &scmi_info->io_ops);
151 if (IS_ERR(scmi_info->shmem))
152 return PTR_ERR(scmi_info->shmem);
153
154 ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
155 if (ret < 0)
156 return ret;
157
158 if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) {
159 resource_size_t size = resource_size(&res);
160 void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8;
161 /* The capability-id is kept in last 8 bytes of shmem.
162 * +-------+ <-- 0
163 * | shmem |
164 * +-------+ <-- size - 8
165 * | capId |
166 * +-------+ <-- size
167 */
168 memcpy_fromio(&cap_id, ptr, sizeof(cap_id));
169 }
170
171 if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
172 scmi_info->param_page = SHMEM_PAGE(res.start);
173 scmi_info->param_offset = SHMEM_OFFSET(res.start);
174 }
175 /*
176 * If there is an interrupt named "a2p", then the service and
177 * completion of a message is signaled by an interrupt rather than by
178 * the return of the SMC call.
179 */
180 scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
181 if (scmi_info->irq > 0) {
182 ret = request_irq(scmi_info->irq, smc_msg_done_isr,
183 IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
184 if (ret) {
185 dev_err(dev, "failed to setup SCMI smc irq\n");
186 return ret;
187 }
188 } else {
189 cinfo->no_completion_irq = true;
190 }
191
192 scmi_info->func_id = func_id;
193 scmi_info->cap_id = cap_id;
194 scmi_info->cinfo = cinfo;
195 smc_channel_lock_init(scmi_info);
196 cinfo->transport_info = scmi_info;
197
198 return 0;
199 }
200
smc_chan_free(int id,void * p,void * data)201 static int smc_chan_free(int id, void *p, void *data)
202 {
203 struct scmi_chan_info *cinfo = p;
204 struct scmi_smc *scmi_info = cinfo->transport_info;
205
206 /*
207 * Different protocols might share the same chan info, so a previous
208 * smc_chan_free call might have already freed the structure.
209 */
210 if (!scmi_info)
211 return 0;
212
213 /* Ignore any possible further reception on the IRQ path */
214 if (scmi_info->irq > 0)
215 free_irq(scmi_info->irq, scmi_info);
216
217 cinfo->transport_info = NULL;
218 scmi_info->cinfo = NULL;
219
220 return 0;
221 }
222
smc_send_message(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)223 static int smc_send_message(struct scmi_chan_info *cinfo,
224 struct scmi_xfer *xfer)
225 {
226 struct scmi_smc *scmi_info = cinfo->transport_info;
227 struct arm_smccc_res res;
228
229 /*
230 * Channel will be released only once response has been
231 * surely fully retrieved, so after .mark_txdone()
232 */
233 smc_channel_lock_acquire(scmi_info, xfer);
234
235 core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo,
236 scmi_info->io_ops->toio);
237
238 if (scmi_info->cap_id != ULONG_MAX)
239 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
240 0, 0, 0, 0, 0, &res);
241 else
242 arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page,
243 scmi_info->param_offset, 0, 0, 0, 0, 0,
244 &res);
245
246 /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
247 if (res.a0) {
248 smc_channel_lock_release(scmi_info);
249 return -EOPNOTSUPP;
250 }
251
252 return 0;
253 }
254
smc_fetch_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)255 static void smc_fetch_response(struct scmi_chan_info *cinfo,
256 struct scmi_xfer *xfer)
257 {
258 struct scmi_smc *scmi_info = cinfo->transport_info;
259
260 core->shmem->fetch_response(scmi_info->shmem, xfer,
261 scmi_info->io_ops->fromio);
262 }
263
smc_mark_txdone(struct scmi_chan_info * cinfo,int ret,struct scmi_xfer * __unused)264 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
265 struct scmi_xfer *__unused)
266 {
267 struct scmi_smc *scmi_info = cinfo->transport_info;
268
269 smc_channel_lock_release(scmi_info);
270 }
271
272 static const struct scmi_transport_ops scmi_smc_ops = {
273 .chan_available = smc_chan_available,
274 .chan_setup = smc_chan_setup,
275 .chan_free = smc_chan_free,
276 .send_message = smc_send_message,
277 .mark_txdone = smc_mark_txdone,
278 .fetch_response = smc_fetch_response,
279 };
280
281 static struct scmi_desc scmi_smc_desc = {
282 .ops = &scmi_smc_ops,
283 .max_rx_timeout_ms = 30,
284 .max_msg = 20,
285 .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
286 /*
287 * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
288 * once the SMC instruction has completed successfully, the issued
289 * SCMI command would have been already fully processed by the SCMI
290 * platform firmware and so any possible response value expected
291 * for the issued command will be immmediately ready to be fetched
292 * from the shared memory area.
293 */
294 .sync_cmds_completed_on_ret = true,
295 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
296 };
297
298 static const struct of_device_id scmi_of_match[] = {
299 { .compatible = "arm,scmi-smc" },
300 { .compatible = "arm,scmi-smc-param" },
301 { .compatible = "qcom,scmi-smc" },
302 { /* Sentinel */ },
303 };
304
305 DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
306 scmi_of_match, core);
307 module_platform_driver(scmi_smc_driver);
308
309 MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
310 MODULE_AUTHOR("Nikunj Kela <quic_nkela@quicinc.com>");
311 MODULE_DESCRIPTION("SCMI SMC Transport driver");
312 MODULE_LICENSE("GPL");
313