xref: /linux/drivers/soc/ti/wkup_m3_ipc.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * AMx3 Wkup M3 IPC driver
3  *
4  * Copyright (C) 2015 Texas Instruments, Inc.
5  *
6  * Dave Gerlach <d-gerlach@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/omap-mailbox.h>
26 #include <linux/platform_device.h>
27 #include <linux/remoteproc.h>
28 #include <linux/suspend.h>
29 #include <linux/wkup_m3_ipc.h>
30 
31 #define AM33XX_CTRL_IPC_REG_COUNT	0x8
32 #define AM33XX_CTRL_IPC_REG_OFFSET(m)	(0x4 + 4 * (m))
33 
34 /* AM33XX M3_TXEV_EOI register */
35 #define AM33XX_CONTROL_M3_TXEV_EOI	0x00
36 
37 #define AM33XX_M3_TXEV_ACK		(0x1 << 0)
38 #define AM33XX_M3_TXEV_ENABLE		(0x0 << 0)
39 
40 #define IPC_CMD_DS0			0x4
41 #define IPC_CMD_STANDBY			0xc
42 #define IPC_CMD_IDLE			0x10
43 #define IPC_CMD_RESET			0xe
44 #define DS_IPC_DEFAULT			0xffffffff
45 #define M3_VERSION_UNKNOWN		0x0000ffff
46 #define M3_BASELINE_VERSION		0x191
47 #define M3_STATUS_RESP_MASK		(0xffff << 16)
48 #define M3_FW_VERSION_MASK		0xffff
49 
50 #define M3_STATE_UNKNOWN		0
51 #define M3_STATE_RESET			1
52 #define M3_STATE_INITED			2
53 #define M3_STATE_MSG_FOR_LP		3
54 #define M3_STATE_MSG_FOR_RESET		4
55 
56 static struct wkup_m3_ipc *m3_ipc_state;
57 
58 static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
59 {
60 	writel(AM33XX_M3_TXEV_ACK,
61 	       m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
62 }
63 
64 static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
65 {
66 	writel(AM33XX_M3_TXEV_ENABLE,
67 	       m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
68 }
69 
70 static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
71 				   u32 val, int ipc_reg_num)
72 {
73 	if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
74 		 "ipc register operation out of range"))
75 		return;
76 
77 	writel(val, m3_ipc->ipc_mem_base +
78 	       AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
79 }
80 
81 static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
82 					  int ipc_reg_num)
83 {
84 	if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
85 		 "ipc register operation out of range"))
86 		return 0;
87 
88 	return readl(m3_ipc->ipc_mem_base +
89 		     AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
90 }
91 
92 static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
93 {
94 	int val;
95 
96 	val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
97 
98 	return val & M3_FW_VERSION_MASK;
99 }
100 
101 static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
102 {
103 	struct wkup_m3_ipc *m3_ipc = ipc_data;
104 	struct device *dev = m3_ipc->dev;
105 	int ver = 0;
106 
107 	am33xx_txev_eoi(m3_ipc);
108 
109 	switch (m3_ipc->state) {
110 	case M3_STATE_RESET:
111 		ver = wkup_m3_fw_version_read(m3_ipc);
112 
113 		if (ver == M3_VERSION_UNKNOWN ||
114 		    ver < M3_BASELINE_VERSION) {
115 			dev_warn(dev, "CM3 Firmware Version %x not supported\n",
116 				 ver);
117 		} else {
118 			dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
119 		}
120 
121 		m3_ipc->state = M3_STATE_INITED;
122 		complete(&m3_ipc->sync_complete);
123 		break;
124 	case M3_STATE_MSG_FOR_RESET:
125 		m3_ipc->state = M3_STATE_INITED;
126 		complete(&m3_ipc->sync_complete);
127 		break;
128 	case M3_STATE_MSG_FOR_LP:
129 		complete(&m3_ipc->sync_complete);
130 		break;
131 	case M3_STATE_UNKNOWN:
132 		dev_warn(dev, "Unknown CM3 State\n");
133 	}
134 
135 	am33xx_txev_enable(m3_ipc);
136 
137 	return IRQ_HANDLED;
138 }
139 
140 static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
141 {
142 	struct device *dev = m3_ipc->dev;
143 	mbox_msg_t dummy_msg = 0;
144 	int ret;
145 
146 	if (!m3_ipc->mbox) {
147 		dev_err(dev,
148 			"No IPC channel to communicate with wkup_m3!\n");
149 		return -EIO;
150 	}
151 
152 	/*
153 	 * Write a dummy message to the mailbox in order to trigger the RX
154 	 * interrupt to alert the M3 that data is available in the IPC
155 	 * registers. We must enable the IRQ here and disable it after in
156 	 * the RX callback to avoid multiple interrupts being received
157 	 * by the CM3.
158 	 */
159 	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
160 	if (ret < 0) {
161 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
162 			__func__, ret);
163 		return ret;
164 	}
165 
166 	ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
167 					  msecs_to_jiffies(500));
168 	if (!ret) {
169 		dev_err(dev, "MPU<->CM3 sync failure\n");
170 		m3_ipc->state = M3_STATE_UNKNOWN;
171 		return -EIO;
172 	}
173 
174 	mbox_client_txdone(m3_ipc->mbox, 0);
175 	return 0;
176 }
177 
178 static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
179 {
180 	struct device *dev = m3_ipc->dev;
181 	mbox_msg_t dummy_msg = 0;
182 	int ret;
183 
184 	if (!m3_ipc->mbox) {
185 		dev_err(dev,
186 			"No IPC channel to communicate with wkup_m3!\n");
187 		return -EIO;
188 	}
189 
190 	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
191 	if (ret < 0) {
192 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
193 			__func__, ret);
194 		return ret;
195 	}
196 
197 	mbox_client_txdone(m3_ipc->mbox, 0);
198 	return 0;
199 }
200 
201 static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
202 {
203 	return ((m3_ipc->state != M3_STATE_RESET) &&
204 		(m3_ipc->state != M3_STATE_UNKNOWN));
205 }
206 
207 /* Public functions */
208 /**
209  * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
210  * @mem_type: memory type value read directly from emif
211  *
212  * wkup_m3 must know what memory type is in use to properly suspend
213  * and resume.
214  */
215 static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
216 {
217 	m3_ipc->mem_type = mem_type;
218 }
219 
220 /**
221  * wkup_m3_set_resume_address - Pass wkup_m3 resume address
222  * @addr: Physical address from which resume code should execute
223  */
224 static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
225 {
226 	m3_ipc->resume_addr = (unsigned long)addr;
227 }
228 
229 /**
230  * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
231  *
232  * Returns code representing the status of a low power mode transition.
233  *	0 - Successful transition
234  *	1 - Failure to transition to low power state
235  */
236 static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
237 {
238 	unsigned int i;
239 	int val;
240 
241 	val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
242 
243 	i = M3_STATUS_RESP_MASK & val;
244 	i >>= __ffs(M3_STATUS_RESP_MASK);
245 
246 	return i;
247 }
248 
249 /**
250  * wkup_m3_prepare_low_power - Request preparation for transition to
251  *			       low power state
252  * @state: A kernel suspend state to enter, either MEM or STANDBY
253  *
254  * Returns 0 if preparation was successful, otherwise returns error code
255  */
256 static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
257 {
258 	struct device *dev = m3_ipc->dev;
259 	int m3_power_state;
260 	int ret = 0;
261 
262 	if (!wkup_m3_is_available(m3_ipc))
263 		return -ENODEV;
264 
265 	switch (state) {
266 	case WKUP_M3_DEEPSLEEP:
267 		m3_power_state = IPC_CMD_DS0;
268 		break;
269 	case WKUP_M3_STANDBY:
270 		m3_power_state = IPC_CMD_STANDBY;
271 		break;
272 	case WKUP_M3_IDLE:
273 		m3_power_state = IPC_CMD_IDLE;
274 		break;
275 	default:
276 		return 1;
277 	}
278 
279 	/* Program each required IPC register then write defaults to others */
280 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
281 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
282 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
283 
284 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
285 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
286 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
287 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
288 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
289 
290 	m3_ipc->state = M3_STATE_MSG_FOR_LP;
291 
292 	if (state == WKUP_M3_IDLE)
293 		ret = wkup_m3_ping_noirq(m3_ipc);
294 	else
295 		ret = wkup_m3_ping(m3_ipc);
296 
297 	if (ret) {
298 		dev_err(dev, "Unable to ping CM3\n");
299 		return ret;
300 	}
301 
302 	return 0;
303 }
304 
305 /**
306  * wkup_m3_finish_low_power - Return m3 to reset state
307  *
308  * Returns 0 if reset was successful, otherwise returns error code
309  */
310 static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
311 {
312 	struct device *dev = m3_ipc->dev;
313 	int ret = 0;
314 
315 	if (!wkup_m3_is_available(m3_ipc))
316 		return -ENODEV;
317 
318 	wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
319 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
320 
321 	m3_ipc->state = M3_STATE_MSG_FOR_RESET;
322 
323 	ret = wkup_m3_ping(m3_ipc);
324 	if (ret) {
325 		dev_err(dev, "Unable to ping CM3\n");
326 		return ret;
327 	}
328 
329 	return 0;
330 }
331 
332 static struct wkup_m3_ipc_ops ipc_ops = {
333 	.set_mem_type = wkup_m3_set_mem_type,
334 	.set_resume_address = wkup_m3_set_resume_address,
335 	.prepare_low_power = wkup_m3_prepare_low_power,
336 	.finish_low_power = wkup_m3_finish_low_power,
337 	.request_pm_status = wkup_m3_request_pm_status,
338 };
339 
340 /**
341  * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
342  *
343  * Returns NULL if the wkup_m3 is not yet available, otherwise returns
344  * pointer to wkup_m3_ipc struct.
345  */
346 struct wkup_m3_ipc *wkup_m3_ipc_get(void)
347 {
348 	if (m3_ipc_state)
349 		get_device(m3_ipc_state->dev);
350 	else
351 		return NULL;
352 
353 	return m3_ipc_state;
354 }
355 EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
356 
357 /**
358  * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
359  * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
360  */
361 void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
362 {
363 	if (m3_ipc_state)
364 		put_device(m3_ipc_state->dev);
365 }
366 EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
367 
368 static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
369 {
370 	struct device *dev = m3_ipc->dev;
371 	int ret;
372 
373 	init_completion(&m3_ipc->sync_complete);
374 
375 	ret = rproc_boot(m3_ipc->rproc);
376 	if (ret)
377 		dev_err(dev, "rproc_boot failed\n");
378 
379 	do_exit(0);
380 }
381 
382 static int wkup_m3_ipc_probe(struct platform_device *pdev)
383 {
384 	struct device *dev = &pdev->dev;
385 	int irq, ret;
386 	phandle rproc_phandle;
387 	struct rproc *m3_rproc;
388 	struct resource *res;
389 	struct task_struct *task;
390 	struct wkup_m3_ipc *m3_ipc;
391 
392 	m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
393 	if (!m3_ipc)
394 		return -ENOMEM;
395 
396 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
397 	m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
398 	if (IS_ERR(m3_ipc->ipc_mem_base)) {
399 		dev_err(dev, "could not ioremap ipc_mem\n");
400 		return PTR_ERR(m3_ipc->ipc_mem_base);
401 	}
402 
403 	irq = platform_get_irq(pdev, 0);
404 	if (!irq) {
405 		dev_err(&pdev->dev, "no irq resource\n");
406 		return -ENXIO;
407 	}
408 
409 	ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
410 			       0, "wkup_m3_txev", m3_ipc);
411 	if (ret) {
412 		dev_err(dev, "request_irq failed\n");
413 		return ret;
414 	}
415 
416 	m3_ipc->mbox_client.dev = dev;
417 	m3_ipc->mbox_client.tx_done = NULL;
418 	m3_ipc->mbox_client.tx_prepare = NULL;
419 	m3_ipc->mbox_client.rx_callback = NULL;
420 	m3_ipc->mbox_client.tx_block = false;
421 	m3_ipc->mbox_client.knows_txdone = false;
422 
423 	m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
424 
425 	if (IS_ERR(m3_ipc->mbox)) {
426 		dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
427 			PTR_ERR(m3_ipc->mbox));
428 		return PTR_ERR(m3_ipc->mbox);
429 	}
430 
431 	if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
432 		dev_err(&pdev->dev, "could not get rproc phandle\n");
433 		ret = -ENODEV;
434 		goto err_free_mbox;
435 	}
436 
437 	m3_rproc = rproc_get_by_phandle(rproc_phandle);
438 	if (!m3_rproc) {
439 		dev_err(&pdev->dev, "could not get rproc handle\n");
440 		ret = -EPROBE_DEFER;
441 		goto err_free_mbox;
442 	}
443 
444 	m3_ipc->rproc = m3_rproc;
445 	m3_ipc->dev = dev;
446 	m3_ipc->state = M3_STATE_RESET;
447 
448 	m3_ipc->ops = &ipc_ops;
449 
450 	/*
451 	 * Wait for firmware loading completion in a thread so we
452 	 * can boot the wkup_m3 as soon as it's ready without holding
453 	 * up kernel boot
454 	 */
455 	task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc,
456 			   "wkup_m3_rproc_loader");
457 
458 	if (IS_ERR(task)) {
459 		dev_err(dev, "can't create rproc_boot thread\n");
460 		ret = PTR_ERR(task);
461 		goto err_put_rproc;
462 	}
463 
464 	m3_ipc_state = m3_ipc;
465 
466 	return 0;
467 
468 err_put_rproc:
469 	rproc_put(m3_rproc);
470 err_free_mbox:
471 	mbox_free_channel(m3_ipc->mbox);
472 	return ret;
473 }
474 
475 static int wkup_m3_ipc_remove(struct platform_device *pdev)
476 {
477 	mbox_free_channel(m3_ipc_state->mbox);
478 
479 	rproc_shutdown(m3_ipc_state->rproc);
480 	rproc_put(m3_ipc_state->rproc);
481 
482 	m3_ipc_state = NULL;
483 
484 	return 0;
485 }
486 
487 static const struct of_device_id wkup_m3_ipc_of_match[] = {
488 	{ .compatible = "ti,am3352-wkup-m3-ipc", },
489 	{ .compatible = "ti,am4372-wkup-m3-ipc", },
490 	{},
491 };
492 MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
493 
494 static struct platform_driver wkup_m3_ipc_driver = {
495 	.probe = wkup_m3_ipc_probe,
496 	.remove = wkup_m3_ipc_remove,
497 	.driver = {
498 		.name = "wkup_m3_ipc",
499 		.of_match_table = wkup_m3_ipc_of_match,
500 	},
501 };
502 
503 module_platform_driver(wkup_m3_ipc_driver);
504 
505 MODULE_LICENSE("GPL v2");
506 MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
507 MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
508