xref: /linux/drivers/soc/ti/wkup_m3_ipc.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMx3 Wkup M3 IPC driver
4  *
5  * Copyright (C) 2015 Texas Instruments, Inc.
6  *
7  * Dave Gerlach <d-gerlach@ti.com>
8  */
9 
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/firmware.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/remoteproc.h>
21 #include <linux/suspend.h>
22 #include <linux/wkup_m3_ipc.h>
23 
24 #define AM33XX_CTRL_IPC_REG_COUNT	0x8
25 #define AM33XX_CTRL_IPC_REG_OFFSET(m)	(0x4 + 4 * (m))
26 
27 /* AM33XX M3_TXEV_EOI register */
28 #define AM33XX_CONTROL_M3_TXEV_EOI	0x00
29 
30 #define AM33XX_M3_TXEV_ACK		(0x1 << 0)
31 #define AM33XX_M3_TXEV_ENABLE		(0x0 << 0)
32 
33 #define IPC_CMD_DS0			0x4
34 #define IPC_CMD_STANDBY			0xc
35 #define IPC_CMD_IDLE			0x10
36 #define IPC_CMD_RESET			0xe
37 #define DS_IPC_DEFAULT			0xffffffff
38 #define M3_VERSION_UNKNOWN		0x0000ffff
39 #define M3_BASELINE_VERSION		0x191
40 #define M3_STATUS_RESP_MASK		(0xffff << 16)
41 #define M3_FW_VERSION_MASK		0xffff
42 #define M3_WAKE_SRC_MASK		0xff
43 
44 #define IPC_MEM_TYPE_SHIFT		(0x0)
45 #define IPC_MEM_TYPE_MASK		(0x7 << 0)
46 #define IPC_VTT_STAT_SHIFT		(0x3)
47 #define IPC_VTT_STAT_MASK		(0x1 << 3)
48 #define IPC_VTT_GPIO_PIN_SHIFT		(0x4)
49 #define IPC_VTT_GPIO_PIN_MASK		(0x3f << 4)
50 #define IPC_IO_ISOLATION_STAT_SHIFT	(10)
51 #define IPC_IO_ISOLATION_STAT_MASK	(0x1 << 10)
52 
53 #define IPC_DBG_HALT_SHIFT		(11)
54 #define IPC_DBG_HALT_MASK		(0x1 << 11)
55 
56 #define M3_STATE_UNKNOWN		0
57 #define M3_STATE_RESET			1
58 #define M3_STATE_INITED			2
59 #define M3_STATE_MSG_FOR_LP		3
60 #define M3_STATE_MSG_FOR_RESET		4
61 
62 #define WKUP_M3_SD_FW_MAGIC		0x570C
63 
64 #define WKUP_M3_DMEM_START		0x80000
65 #define WKUP_M3_AUXDATA_OFFSET		0x1000
66 #define WKUP_M3_AUXDATA_SIZE		0xFF
67 
68 static struct wkup_m3_ipc *m3_ipc_state;
69 
70 static const struct wkup_m3_wakeup_src wakeups[] = {
71 	{.irq_nr = 16,	.src = "PRCM"},
72 	{.irq_nr = 35,	.src = "USB0_PHY"},
73 	{.irq_nr = 36,	.src = "USB1_PHY"},
74 	{.irq_nr = 40,	.src = "I2C0"},
75 	{.irq_nr = 41,	.src = "RTC Timer"},
76 	{.irq_nr = 42,	.src = "RTC Alarm"},
77 	{.irq_nr = 43,	.src = "Timer0"},
78 	{.irq_nr = 44,	.src = "Timer1"},
79 	{.irq_nr = 45,	.src = "UART"},
80 	{.irq_nr = 46,	.src = "GPIO0"},
81 	{.irq_nr = 48,	.src = "MPU_WAKE"},
82 	{.irq_nr = 49,	.src = "WDT0"},
83 	{.irq_nr = 50,	.src = "WDT1"},
84 	{.irq_nr = 51,	.src = "ADC_TSC"},
85 	{.irq_nr = 0,	.src = "Unknown"},
86 };
87 
88 /**
89  * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
90  * @data - pointer to data
91  * @sz - size of data to copy (limit 256 bytes)
92  *
93  * Copies any additional blob of data to the wkup_m3 dmem to be used by the
94  * firmware
95  */
96 static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
97 					   const void *data, int sz)
98 {
99 	unsigned long aux_data_dev_addr;
100 	void *aux_data_addr;
101 
102 	aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
103 	aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
104 				       aux_data_dev_addr,
105 				       WKUP_M3_AUXDATA_SIZE,
106 				       NULL);
107 	memcpy(aux_data_addr, data, sz);
108 
109 	return WKUP_M3_AUXDATA_OFFSET;
110 }
111 
112 static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
113 {
114 	unsigned long val, aux_base;
115 	struct wkup_m3_scale_data_header hdr;
116 	struct wkup_m3_ipc *m3_ipc = context;
117 	struct device *dev = m3_ipc->dev;
118 
119 	if (!fw) {
120 		dev_err(dev, "Voltage scale fw name given but file missing.\n");
121 		return;
122 	}
123 
124 	memcpy(&hdr, fw->data, sizeof(hdr));
125 
126 	if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
127 		dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
128 		goto release_sd_fw;
129 	}
130 
131 	aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
132 					 fw->size - sizeof(hdr));
133 
134 	val = (aux_base + hdr.sleep_offset);
135 	val |= ((aux_base + hdr.wake_offset) << 16);
136 
137 	m3_ipc->volt_scale_offsets = val;
138 
139 release_sd_fw:
140 	release_firmware(fw);
141 };
142 
143 static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
144 				   struct device *dev)
145 {
146 	int ret = 0;
147 
148 	/*
149 	 * If no name is provided, user has already been warned, pm will
150 	 * still work so return 0
151 	 */
152 
153 	if (!m3_ipc->sd_fw_name)
154 		return ret;
155 
156 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
157 				      m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
158 				      m3_ipc, wkup_m3_scale_data_fw_cb);
159 
160 	return ret;
161 }
162 
163 #ifdef CONFIG_DEBUG_FS
164 static void wkup_m3_set_halt_late(bool enabled)
165 {
166 	if (enabled)
167 		m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
168 	else
169 		m3_ipc_state->halt = 0;
170 }
171 
172 static int option_get(void *data, u64 *val)
173 {
174 	u32 *option = data;
175 
176 	*val = *option;
177 
178 	return 0;
179 }
180 
181 static int option_set(void *data, u64 val)
182 {
183 	u32 *option = data;
184 
185 	*option = val;
186 
187 	if (option == &m3_ipc_state->halt) {
188 		if (val)
189 			wkup_m3_set_halt_late(true);
190 		else
191 			wkup_m3_set_halt_late(false);
192 	}
193 
194 	return 0;
195 }
196 
197 DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
198 			"%llu\n");
199 
200 static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
201 {
202 	m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
203 
204 	if (IS_ERR(m3_ipc->dbg_path))
205 		return -EINVAL;
206 
207 	(void)debugfs_create_file("enable_late_halt", 0644,
208 				  m3_ipc->dbg_path,
209 				  &m3_ipc->halt,
210 				  &wkup_m3_ipc_option_fops);
211 
212 	return 0;
213 }
214 
215 static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
216 {
217 	debugfs_remove_recursive(m3_ipc->dbg_path);
218 }
219 #else
220 static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
221 {
222 	return 0;
223 }
224 
225 static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
226 {
227 }
228 #endif /* CONFIG_DEBUG_FS */
229 
230 static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
231 {
232 	writel(AM33XX_M3_TXEV_ACK,
233 	       m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
234 }
235 
236 static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
237 {
238 	writel(AM33XX_M3_TXEV_ENABLE,
239 	       m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
240 }
241 
242 static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
243 				   u32 val, int ipc_reg_num)
244 {
245 	if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
246 		 "ipc register operation out of range"))
247 		return;
248 
249 	writel(val, m3_ipc->ipc_mem_base +
250 	       AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
251 }
252 
253 static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
254 					  int ipc_reg_num)
255 {
256 	if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
257 		 "ipc register operation out of range"))
258 		return 0;
259 
260 	return readl(m3_ipc->ipc_mem_base +
261 		     AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
262 }
263 
264 static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
265 {
266 	int val;
267 
268 	val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
269 
270 	return val & M3_FW_VERSION_MASK;
271 }
272 
273 static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
274 {
275 	struct wkup_m3_ipc *m3_ipc = ipc_data;
276 	struct device *dev = m3_ipc->dev;
277 	int ver = 0;
278 
279 	am33xx_txev_eoi(m3_ipc);
280 
281 	switch (m3_ipc->state) {
282 	case M3_STATE_RESET:
283 		ver = wkup_m3_fw_version_read(m3_ipc);
284 
285 		if (ver == M3_VERSION_UNKNOWN ||
286 		    ver < M3_BASELINE_VERSION) {
287 			dev_warn(dev, "CM3 Firmware Version %x not supported\n",
288 				 ver);
289 		} else {
290 			dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
291 		}
292 
293 		m3_ipc->state = M3_STATE_INITED;
294 		wkup_m3_init_scale_data(m3_ipc, dev);
295 		complete(&m3_ipc->sync_complete);
296 		break;
297 	case M3_STATE_MSG_FOR_RESET:
298 		m3_ipc->state = M3_STATE_INITED;
299 		complete(&m3_ipc->sync_complete);
300 		break;
301 	case M3_STATE_MSG_FOR_LP:
302 		complete(&m3_ipc->sync_complete);
303 		break;
304 	case M3_STATE_UNKNOWN:
305 		dev_warn(dev, "Unknown CM3 State\n");
306 	}
307 
308 	am33xx_txev_enable(m3_ipc);
309 
310 	return IRQ_HANDLED;
311 }
312 
313 static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
314 {
315 	struct device *dev = m3_ipc->dev;
316 	int ret;
317 
318 	if (!m3_ipc->mbox) {
319 		dev_err(dev,
320 			"No IPC channel to communicate with wkup_m3!\n");
321 		return -EIO;
322 	}
323 
324 	/*
325 	 * Write a dummy message to the mailbox in order to trigger the RX
326 	 * interrupt to alert the M3 that data is available in the IPC
327 	 * registers. We must enable the IRQ here and disable it after in
328 	 * the RX callback to avoid multiple interrupts being received
329 	 * by the CM3.
330 	 */
331 	ret = mbox_send_message(m3_ipc->mbox, NULL);
332 	if (ret < 0) {
333 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
334 			__func__, ret);
335 		return ret;
336 	}
337 
338 	ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
339 					  msecs_to_jiffies(500));
340 	if (!ret) {
341 		dev_err(dev, "MPU<->CM3 sync failure\n");
342 		m3_ipc->state = M3_STATE_UNKNOWN;
343 		return -EIO;
344 	}
345 
346 	mbox_client_txdone(m3_ipc->mbox, 0);
347 	return 0;
348 }
349 
350 static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
351 {
352 	struct device *dev = m3_ipc->dev;
353 	int ret;
354 
355 	if (!m3_ipc->mbox) {
356 		dev_err(dev,
357 			"No IPC channel to communicate with wkup_m3!\n");
358 		return -EIO;
359 	}
360 
361 	ret = mbox_send_message(m3_ipc->mbox, NULL);
362 	if (ret < 0) {
363 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
364 			__func__, ret);
365 		return ret;
366 	}
367 
368 	mbox_client_txdone(m3_ipc->mbox, 0);
369 	return 0;
370 }
371 
372 static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
373 {
374 	return ((m3_ipc->state != M3_STATE_RESET) &&
375 		(m3_ipc->state != M3_STATE_UNKNOWN));
376 }
377 
378 static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
379 {
380 	m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
381 			    (gpio << IPC_VTT_GPIO_PIN_SHIFT);
382 }
383 
384 static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
385 {
386 	m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
387 }
388 
389 /* Public functions */
390 /**
391  * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
392  * @m3_ipc: Pointer to wkup_m3_ipc context
393  * @mem_type: memory type value read directly from emif
394  *
395  * wkup_m3 must know what memory type is in use to properly suspend
396  * and resume.
397  */
398 static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
399 {
400 	m3_ipc->mem_type = mem_type;
401 }
402 
403 /**
404  * wkup_m3_set_resume_address - Pass wkup_m3 resume address
405  * @m3_ipc: Pointer to wkup_m3_ipc context
406  * @addr: Physical address from which resume code should execute
407  */
408 static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
409 {
410 	m3_ipc->resume_addr = (unsigned long)addr;
411 }
412 
413 /**
414  * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
415  * @m3_ipc: Pointer to wkup_m3_ipc context
416  *
417  * Returns code representing the status of a low power mode transition.
418  *	0 - Successful transition
419  *	1 - Failure to transition to low power state
420  */
421 static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
422 {
423 	unsigned int i;
424 	int val;
425 
426 	val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
427 
428 	i = M3_STATUS_RESP_MASK & val;
429 	i >>= __ffs(M3_STATUS_RESP_MASK);
430 
431 	return i;
432 }
433 
434 /**
435  * wkup_m3_prepare_low_power - Request preparation for transition to
436  *			       low power state
437  * @m3_ipc: Pointer to wkup_m3_ipc context
438  * @state: A kernel suspend state to enter, either MEM or STANDBY
439  *
440  * Returns 0 if preparation was successful, otherwise returns error code
441  */
442 static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
443 {
444 	struct device *dev = m3_ipc->dev;
445 	int m3_power_state;
446 	int ret = 0;
447 
448 	if (!wkup_m3_is_available(m3_ipc))
449 		return -ENODEV;
450 
451 	switch (state) {
452 	case WKUP_M3_DEEPSLEEP:
453 		m3_power_state = IPC_CMD_DS0;
454 		wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
455 		break;
456 	case WKUP_M3_STANDBY:
457 		m3_power_state = IPC_CMD_STANDBY;
458 		wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
459 		break;
460 	case WKUP_M3_IDLE:
461 		m3_power_state = IPC_CMD_IDLE;
462 		wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
463 		break;
464 	default:
465 		return 1;
466 	}
467 
468 	/* Program each required IPC register then write defaults to others */
469 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
470 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
471 	wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
472 			       m3_ipc->vtt_conf |
473 			       m3_ipc->isolation_conf |
474 			       m3_ipc->halt, 4);
475 
476 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
477 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
478 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
479 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
480 
481 	m3_ipc->state = M3_STATE_MSG_FOR_LP;
482 
483 	if (state == WKUP_M3_IDLE)
484 		ret = wkup_m3_ping_noirq(m3_ipc);
485 	else
486 		ret = wkup_m3_ping(m3_ipc);
487 
488 	if (ret) {
489 		dev_err(dev, "Unable to ping CM3\n");
490 		return ret;
491 	}
492 
493 	return 0;
494 }
495 
496 /**
497  * wkup_m3_finish_low_power - Return m3 to reset state
498  * @m3_ipc: Pointer to wkup_m3_ipc context
499  *
500  * Returns 0 if reset was successful, otherwise returns error code
501  */
502 static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
503 {
504 	struct device *dev = m3_ipc->dev;
505 	int ret = 0;
506 
507 	if (!wkup_m3_is_available(m3_ipc))
508 		return -ENODEV;
509 
510 	wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
511 	wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
512 
513 	m3_ipc->state = M3_STATE_MSG_FOR_RESET;
514 
515 	ret = wkup_m3_ping(m3_ipc);
516 	if (ret) {
517 		dev_err(dev, "Unable to ping CM3\n");
518 		return ret;
519 	}
520 
521 	return 0;
522 }
523 
524 /**
525  * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
526  * @m3_ipc: Pointer to wkup_m3_ipc context
527  */
528 static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
529 {
530 	unsigned int wakeup_src_idx;
531 	int j, val;
532 
533 	val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
534 
535 	wakeup_src_idx = val & M3_WAKE_SRC_MASK;
536 
537 	for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
538 		if (wakeups[j].irq_nr == wakeup_src_idx)
539 			return wakeups[j].src;
540 	}
541 	return wakeups[j].src;
542 }
543 
544 /**
545  * wkup_m3_set_rtc_only - Set the rtc_only flag
546  * @m3_ipc: Pointer to wkup_m3_ipc context
547  */
548 static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
549 {
550 	if (m3_ipc_state)
551 		m3_ipc_state->is_rtc_only = true;
552 }
553 
554 static struct wkup_m3_ipc_ops ipc_ops = {
555 	.set_mem_type = wkup_m3_set_mem_type,
556 	.set_resume_address = wkup_m3_set_resume_address,
557 	.prepare_low_power = wkup_m3_prepare_low_power,
558 	.finish_low_power = wkup_m3_finish_low_power,
559 	.request_pm_status = wkup_m3_request_pm_status,
560 	.request_wake_src = wkup_m3_request_wake_src,
561 	.set_rtc_only = wkup_m3_set_rtc_only,
562 };
563 
564 /**
565  * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
566  *
567  * Returns NULL if the wkup_m3 is not yet available, otherwise returns
568  * pointer to wkup_m3_ipc struct.
569  */
570 struct wkup_m3_ipc *wkup_m3_ipc_get(void)
571 {
572 	if (m3_ipc_state)
573 		get_device(m3_ipc_state->dev);
574 	else
575 		return NULL;
576 
577 	return m3_ipc_state;
578 }
579 EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
580 
581 /**
582  * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
583  * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
584  */
585 void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
586 {
587 	if (m3_ipc_state)
588 		put_device(m3_ipc_state->dev);
589 }
590 EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
591 
592 static int wkup_m3_rproc_boot_thread(void *arg)
593 {
594 	struct wkup_m3_ipc *m3_ipc = arg;
595 	struct device *dev = m3_ipc->dev;
596 	int ret;
597 
598 	init_completion(&m3_ipc->sync_complete);
599 
600 	ret = rproc_boot(m3_ipc->rproc);
601 	if (ret)
602 		dev_err(dev, "rproc_boot failed\n");
603 	else
604 		m3_ipc_state = m3_ipc;
605 
606 	return 0;
607 }
608 
609 static int wkup_m3_ipc_probe(struct platform_device *pdev)
610 {
611 	struct device *dev = &pdev->dev;
612 	int irq, ret, temp;
613 	phandle rproc_phandle;
614 	struct rproc *m3_rproc;
615 	struct task_struct *task;
616 	struct wkup_m3_ipc *m3_ipc;
617 	struct device_node *np = dev->of_node;
618 
619 	m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
620 	if (!m3_ipc)
621 		return -ENOMEM;
622 
623 	m3_ipc->ipc_mem_base = devm_platform_ioremap_resource(pdev, 0);
624 	if (IS_ERR(m3_ipc->ipc_mem_base))
625 		return PTR_ERR(m3_ipc->ipc_mem_base);
626 
627 	irq = platform_get_irq(pdev, 0);
628 	if (irq < 0)
629 		return irq;
630 
631 	ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
632 			       0, "wkup_m3_txev", m3_ipc);
633 	if (ret) {
634 		dev_err(dev, "request_irq failed\n");
635 		return ret;
636 	}
637 
638 	m3_ipc->mbox_client.dev = dev;
639 	m3_ipc->mbox_client.tx_done = NULL;
640 	m3_ipc->mbox_client.tx_prepare = NULL;
641 	m3_ipc->mbox_client.rx_callback = NULL;
642 	m3_ipc->mbox_client.tx_block = false;
643 	m3_ipc->mbox_client.knows_txdone = false;
644 
645 	m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
646 
647 	if (IS_ERR(m3_ipc->mbox)) {
648 		dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
649 			PTR_ERR(m3_ipc->mbox));
650 		return PTR_ERR(m3_ipc->mbox);
651 	}
652 
653 	if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
654 		dev_err(&pdev->dev, "could not get rproc phandle\n");
655 		ret = -ENODEV;
656 		goto err_free_mbox;
657 	}
658 
659 	m3_rproc = rproc_get_by_phandle(rproc_phandle);
660 	if (!m3_rproc) {
661 		dev_err(&pdev->dev, "could not get rproc handle\n");
662 		ret = -EPROBE_DEFER;
663 		goto err_free_mbox;
664 	}
665 
666 	m3_ipc->rproc = m3_rproc;
667 	m3_ipc->dev = dev;
668 	m3_ipc->state = M3_STATE_RESET;
669 
670 	m3_ipc->ops = &ipc_ops;
671 
672 	if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
673 		if (temp >= 0 && temp <= 31)
674 			wkup_m3_set_vtt_gpio(m3_ipc, temp);
675 		else
676 			dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
677 	}
678 
679 	if (of_property_read_bool(np, "ti,set-io-isolation"))
680 		wkup_m3_set_io_isolation(m3_ipc);
681 
682 	ret = of_property_read_string(np, "firmware-name",
683 				      &m3_ipc->sd_fw_name);
684 	if (ret) {
685 		dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
686 	}
687 
688 	/*
689 	 * Wait for firmware loading completion in a thread so we
690 	 * can boot the wkup_m3 as soon as it's ready without holding
691 	 * up kernel boot
692 	 */
693 	task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc,
694 			   "wkup_m3_rproc_loader");
695 
696 	if (IS_ERR(task)) {
697 		dev_err(dev, "can't create rproc_boot thread\n");
698 		ret = PTR_ERR(task);
699 		goto err_put_rproc;
700 	}
701 
702 	wkup_m3_ipc_dbg_init(m3_ipc);
703 
704 	return 0;
705 
706 err_put_rproc:
707 	rproc_put(m3_rproc);
708 err_free_mbox:
709 	mbox_free_channel(m3_ipc->mbox);
710 	return ret;
711 }
712 
713 static void wkup_m3_ipc_remove(struct platform_device *pdev)
714 {
715 	wkup_m3_ipc_dbg_destroy(m3_ipc_state);
716 
717 	mbox_free_channel(m3_ipc_state->mbox);
718 
719 	rproc_shutdown(m3_ipc_state->rproc);
720 	rproc_put(m3_ipc_state->rproc);
721 
722 	m3_ipc_state = NULL;
723 }
724 
725 static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
726 {
727 	/*
728 	 * Nothing needs to be done on suspend even with rtc_only flag set
729 	 */
730 	return 0;
731 }
732 
733 static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
734 {
735 	if (m3_ipc_state->is_rtc_only) {
736 		rproc_shutdown(m3_ipc_state->rproc);
737 		rproc_boot(m3_ipc_state->rproc);
738 	}
739 
740 	m3_ipc_state->is_rtc_only = false;
741 
742 	return 0;
743 }
744 
745 static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
746 	SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
747 };
748 
749 static const struct of_device_id wkup_m3_ipc_of_match[] = {
750 	{ .compatible = "ti,am3352-wkup-m3-ipc", },
751 	{ .compatible = "ti,am4372-wkup-m3-ipc", },
752 	{},
753 };
754 MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
755 
756 static struct platform_driver wkup_m3_ipc_driver = {
757 	.probe = wkup_m3_ipc_probe,
758 	.remove_new = wkup_m3_ipc_remove,
759 	.driver = {
760 		.name = "wkup_m3_ipc",
761 		.of_match_table = wkup_m3_ipc_of_match,
762 		.pm = &wkup_m3_ipc_pm_ops,
763 	},
764 };
765 
766 module_platform_driver(wkup_m3_ipc_driver);
767 
768 MODULE_LICENSE("GPL v2");
769 MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
770 MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
771