xref: /linux/drivers/rpmsg/qcom_glink_smem.c (revision c894ec016c9d0418dd832202225a8c64f450d71e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016, Linaro Ltd
4  */
5 
6 #include <linux/io.h>
7 #include <linux/module.h>
8 #include <linux/of.h>
9 #include <linux/of_address.h>
10 #include <linux/of_irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/slab.h>
16 #include <linux/rpmsg.h>
17 #include <linux/idr.h>
18 #include <linux/circ_buf.h>
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/sizes.h>
21 #include <linux/delay.h>
22 #include <linux/regmap.h>
23 #include <linux/workqueue.h>
24 #include <linux/list.h>
25 
26 #include <linux/rpmsg/qcom_glink.h>
27 
28 #include "qcom_glink_native.h"
29 
30 #define FIFO_FULL_RESERVE 8
31 #define FIFO_ALIGNMENT 8
32 #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
33 
34 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR	478
35 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0		479
36 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1		480
37 
38 struct qcom_glink_smem {
39 	struct device dev;
40 
41 	int irq;
42 	struct qcom_glink *glink;
43 
44 	struct mbox_client mbox_client;
45 	struct mbox_chan *mbox_chan;
46 
47 	u32 remote_pid;
48 };
49 
50 struct glink_smem_pipe {
51 	struct qcom_glink_pipe native;
52 
53 	__le32 *tail;
54 	__le32 *head;
55 
56 	void *fifo;
57 
58 	struct qcom_glink_smem *smem;
59 };
60 
61 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
62 
63 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
64 {
65 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
66 	struct qcom_glink_smem *smem = pipe->smem;
67 	size_t len;
68 	void *fifo;
69 	u32 head;
70 	u32 tail;
71 
72 	if (!pipe->fifo) {
73 		fifo = qcom_smem_get(smem->remote_pid,
74 				     SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len);
75 		if (IS_ERR(fifo)) {
76 			pr_err("failed to acquire RX fifo handle: %ld\n",
77 			       PTR_ERR(fifo));
78 			return 0;
79 		}
80 
81 		pipe->fifo = fifo;
82 		pipe->native.length = len;
83 	}
84 
85 	head = le32_to_cpu(*pipe->head);
86 	tail = le32_to_cpu(*pipe->tail);
87 
88 	if (head < tail)
89 		return pipe->native.length - tail + head;
90 	else
91 		return head - tail;
92 }
93 
94 static void glink_smem_rx_peek(struct qcom_glink_pipe *np,
95 			       void *data, unsigned int offset, size_t count)
96 {
97 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
98 	size_t len;
99 	u32 tail;
100 
101 	tail = le32_to_cpu(*pipe->tail);
102 	tail += offset;
103 	if (tail >= pipe->native.length)
104 		tail -= pipe->native.length;
105 
106 	len = min_t(size_t, count, pipe->native.length - tail);
107 	if (len)
108 		memcpy_fromio(data, pipe->fifo + tail, len);
109 
110 	if (len != count)
111 		memcpy_fromio(data + len, pipe->fifo, (count - len));
112 }
113 
114 static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
115 				  size_t count)
116 {
117 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
118 	u32 tail;
119 
120 	tail = le32_to_cpu(*pipe->tail);
121 
122 	tail += count;
123 	if (tail >= pipe->native.length)
124 		tail -= pipe->native.length;
125 
126 	*pipe->tail = cpu_to_le32(tail);
127 }
128 
129 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np)
130 {
131 	struct glink_smem_pipe *pipe = to_smem_pipe(np);
132 	u32 head;
133 	u32 tail;
134 	u32 avail;
135 
136 	head = le32_to_cpu(*pipe->head);
137 	tail = le32_to_cpu(*pipe->tail);
138 
139 	if (tail <= head)
140 		avail = pipe->native.length - head + tail;
141 	else
142 		avail = tail - head;
143 
144 	if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE))
145 		avail = 0;
146 	else
147 		avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
148 
149 	return avail;
150 }
151 
152 static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe,
153 					    unsigned int head,
154 					    const void *data, size_t count)
155 {
156 	size_t len;
157 
158 	len = min_t(size_t, count, pipe->native.length - head);
159 	if (len)
160 		memcpy(pipe->fifo + head, data, len);
161 
162 	if (len != count)
163 		memcpy(pipe->fifo, data + len, count - len);
164 
165 	head += count;
166 	if (head >= pipe->native.length)
167 		head -= pipe->native.length;
168 
169 	return head;
170 }
171 
172 static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe,
173 				const void *hdr, size_t hlen,
174 				const void *data, size_t dlen)
175 {
176 	struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
177 	unsigned int head;
178 
179 	head = le32_to_cpu(*pipe->head);
180 
181 	head = glink_smem_tx_write_one(pipe, head, hdr, hlen);
182 	head = glink_smem_tx_write_one(pipe, head, data, dlen);
183 
184 	/* Ensure head is always aligned to 8 bytes */
185 	head = ALIGN(head, 8);
186 	if (head >= pipe->native.length)
187 		head -= pipe->native.length;
188 
189 	/* Ensure ordering of fifo and head update */
190 	wmb();
191 
192 	*pipe->head = cpu_to_le32(head);
193 }
194 
195 static void glink_smem_tx_kick(struct qcom_glink_pipe *glink_pipe)
196 {
197 	struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
198 	struct qcom_glink_smem *smem = pipe->smem;
199 
200 	mbox_send_message(smem->mbox_chan, NULL);
201 	mbox_client_txdone(smem->mbox_chan, 0);
202 }
203 
204 static irqreturn_t qcom_glink_smem_intr(int irq, void *data)
205 {
206 	struct qcom_glink_smem *smem = data;
207 
208 	qcom_glink_native_rx(smem->glink);
209 
210 	return IRQ_HANDLED;
211 }
212 
213 static void qcom_glink_smem_release(struct device *dev)
214 {
215 	struct qcom_glink_smem *smem = container_of(dev, struct qcom_glink_smem, dev);
216 
217 	kfree(smem);
218 }
219 
220 struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
221 						 struct device_node *node)
222 {
223 	struct glink_smem_pipe *rx_pipe;
224 	struct glink_smem_pipe *tx_pipe;
225 	struct qcom_glink_smem *smem;
226 	struct qcom_glink *glink;
227 	struct device *dev;
228 	u32 remote_pid;
229 	__le32 *descs;
230 	size_t size;
231 	int ret;
232 
233 	smem = kzalloc(sizeof(*smem), GFP_KERNEL);
234 	if (!smem)
235 		return ERR_PTR(-ENOMEM);
236 
237 	dev = &smem->dev;
238 
239 	dev->parent = parent;
240 	dev->of_node = node;
241 	dev->release = qcom_glink_smem_release;
242 	dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
243 	ret = device_register(dev);
244 	if (ret) {
245 		pr_err("failed to register glink edge\n");
246 		put_device(dev);
247 		return ERR_PTR(ret);
248 	}
249 
250 	ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
251 				   &remote_pid);
252 	if (ret) {
253 		dev_err(dev, "failed to parse qcom,remote-pid\n");
254 		goto err_put_dev;
255 	}
256 
257 	smem->remote_pid = remote_pid;
258 
259 	rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
260 	tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
261 	if (!rx_pipe || !tx_pipe) {
262 		ret = -ENOMEM;
263 		goto err_put_dev;
264 	}
265 
266 	ret = qcom_smem_alloc(remote_pid,
267 			      SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32);
268 	if (ret && ret != -EEXIST) {
269 		dev_err(dev, "failed to allocate glink descriptors\n");
270 		goto err_put_dev;
271 	}
272 
273 	descs = qcom_smem_get(remote_pid,
274 			      SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
275 	if (IS_ERR(descs)) {
276 		dev_err(dev, "failed to acquire xprt descriptor\n");
277 		ret = PTR_ERR(descs);
278 		goto err_put_dev;
279 	}
280 
281 	if (size != 32) {
282 		dev_err(dev, "glink descriptor of invalid size\n");
283 		ret = -EINVAL;
284 		goto err_put_dev;
285 	}
286 
287 	tx_pipe->tail = &descs[0];
288 	tx_pipe->head = &descs[1];
289 	rx_pipe->tail = &descs[2];
290 	rx_pipe->head = &descs[3];
291 
292 	ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
293 			      SZ_16K);
294 	if (ret && ret != -EEXIST) {
295 		dev_err(dev, "failed to allocate TX fifo\n");
296 		goto err_put_dev;
297 	}
298 
299 	tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
300 				      &tx_pipe->native.length);
301 	if (IS_ERR(tx_pipe->fifo)) {
302 		dev_err(dev, "failed to acquire TX fifo\n");
303 		ret = PTR_ERR(tx_pipe->fifo);
304 		goto err_put_dev;
305 	}
306 
307 	smem->irq = of_irq_get(smem->dev.of_node, 0);
308 	ret = devm_request_irq(&smem->dev, smem->irq, qcom_glink_smem_intr,
309 			       IRQF_NO_SUSPEND | IRQF_NO_AUTOEN,
310 			       "glink-smem", smem);
311 	if (ret) {
312 		dev_err(&smem->dev, "failed to request IRQ\n");
313 		goto err_put_dev;
314 	}
315 
316 	smem->mbox_client.dev = &smem->dev;
317 	smem->mbox_client.knows_txdone = true;
318 	smem->mbox_chan = mbox_request_channel(&smem->mbox_client, 0);
319 	if (IS_ERR(smem->mbox_chan)) {
320 		ret = dev_err_probe(&smem->dev, PTR_ERR(smem->mbox_chan),
321 				    "failed to acquire IPC channel\n");
322 		goto err_put_dev;
323 	}
324 
325 	rx_pipe->smem = smem;
326 	rx_pipe->native.avail = glink_smem_rx_avail;
327 	rx_pipe->native.peek = glink_smem_rx_peek;
328 	rx_pipe->native.advance = glink_smem_rx_advance;
329 
330 	tx_pipe->smem = smem;
331 	tx_pipe->native.avail = glink_smem_tx_avail;
332 	tx_pipe->native.write = glink_smem_tx_write;
333 	tx_pipe->native.kick = glink_smem_tx_kick;
334 
335 	*rx_pipe->tail = 0;
336 	*tx_pipe->head = 0;
337 
338 	glink = qcom_glink_native_probe(dev,
339 					GLINK_FEATURE_INTENT_REUSE,
340 					&rx_pipe->native, &tx_pipe->native,
341 					false);
342 	if (IS_ERR(glink)) {
343 		ret = PTR_ERR(glink);
344 		goto err_free_mbox;
345 	}
346 
347 	smem->glink = glink;
348 
349 	enable_irq(smem->irq);
350 
351 	return smem;
352 
353 err_free_mbox:
354 	mbox_free_channel(smem->mbox_chan);
355 
356 err_put_dev:
357 	device_unregister(dev);
358 
359 	return ERR_PTR(ret);
360 }
361 EXPORT_SYMBOL_GPL(qcom_glink_smem_register);
362 
363 void qcom_glink_smem_unregister(struct qcom_glink_smem *smem)
364 {
365 	struct qcom_glink *glink = smem->glink;
366 
367 	disable_irq(smem->irq);
368 
369 	qcom_glink_native_remove(glink);
370 
371 	mbox_free_channel(smem->mbox_chan);
372 	device_unregister(&smem->dev);
373 }
374 EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister);
375 
376 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
377 MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver");
378 MODULE_LICENSE("GPL v2");
379