xref: /linux/drivers/remoteproc/mtk_scp.c (revision e637b37a520513a04d00f4add07ec25f357e6c6d)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2019 MediaTek Inc.
4 
5 #include <asm/barrier.h>
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/interrupt.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_reserved_mem.h>
15 #include <linux/platform_device.h>
16 #include <linux/remoteproc.h>
17 #include <linux/remoteproc/mtk_scp.h>
18 #include <linux/rpmsg/mtk_rpmsg.h>
19 #include <linux/string.h>
20 
21 #include "mtk_common.h"
22 #include "remoteproc_internal.h"
23 
24 #define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
25 
26 /**
27  * scp_get() - get a reference to SCP.
28  *
29  * @pdev:	the platform device of the module requesting SCP platform
30  *		device for using SCP API.
31  *
32  * Return: Return NULL if failed.  otherwise reference to SCP.
33  **/
scp_get(struct platform_device * pdev)34 struct mtk_scp *scp_get(struct platform_device *pdev)
35 {
36 	struct device *dev = &pdev->dev;
37 	struct device_node *scp_node;
38 	struct platform_device *scp_pdev;
39 
40 	scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
41 	if (!scp_node) {
42 		dev_err(dev, "can't get SCP node\n");
43 		return NULL;
44 	}
45 
46 	scp_pdev = of_find_device_by_node(scp_node);
47 	of_node_put(scp_node);
48 
49 	if (WARN_ON(!scp_pdev)) {
50 		dev_err(dev, "SCP pdev failed\n");
51 		return NULL;
52 	}
53 
54 	return platform_get_drvdata(scp_pdev);
55 }
56 EXPORT_SYMBOL_GPL(scp_get);
57 
58 /**
59  * scp_put() - "free" the SCP
60  *
61  * @scp:	mtk_scp structure from scp_get().
62  **/
scp_put(struct mtk_scp * scp)63 void scp_put(struct mtk_scp *scp)
64 {
65 	put_device(scp->dev);
66 }
67 EXPORT_SYMBOL_GPL(scp_put);
68 
scp_wdt_handler(struct mtk_scp * scp,u32 scp_to_host)69 static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
70 {
71 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
72 	struct mtk_scp *scp_node;
73 
74 	dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
75 
76 	/* report watchdog timeout to all cores */
77 	list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
78 		rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
79 }
80 
scp_init_ipi_handler(void * data,unsigned int len,void * priv)81 static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
82 {
83 	struct mtk_scp *scp = priv;
84 	struct scp_run *run = data;
85 
86 	scp->run.signaled = run->signaled;
87 	strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
88 	scp->run.dec_capability = run->dec_capability;
89 	scp->run.enc_capability = run->enc_capability;
90 	wake_up_interruptible(&scp->run.wq);
91 }
92 
scp_ipi_handler(struct mtk_scp * scp)93 static void scp_ipi_handler(struct mtk_scp *scp)
94 {
95 	struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
96 	struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
97 	scp_ipi_handler_t handler;
98 	u32 id = readl(&rcv_obj->id);
99 	u32 len = readl(&rcv_obj->len);
100 	const struct mtk_scp_sizes_data *scp_sizes;
101 
102 	scp_sizes = scp->data->scp_sizes;
103 	if (len > scp_sizes->ipi_share_buffer_size) {
104 		dev_err(scp->dev, "ipi message too long (len %d, max %zd)", len,
105 			scp_sizes->ipi_share_buffer_size);
106 		return;
107 	}
108 	if (id >= SCP_IPI_MAX) {
109 		dev_err(scp->dev, "No such ipi id = %d\n", id);
110 		return;
111 	}
112 
113 	scp_ipi_lock(scp, id);
114 	handler = ipi_desc[id].handler;
115 	if (!handler) {
116 		dev_err(scp->dev, "No handler for ipi id = %d\n", id);
117 		scp_ipi_unlock(scp, id);
118 		return;
119 	}
120 
121 	memcpy_fromio(scp->share_buf, &rcv_obj->share_buf, len);
122 	memset(&scp->share_buf[len], 0, scp_sizes->ipi_share_buffer_size - len);
123 	handler(scp->share_buf, len, ipi_desc[id].priv);
124 	scp_ipi_unlock(scp, id);
125 
126 	scp->ipi_id_ack[id] = true;
127 	wake_up(&scp->ack_wq);
128 }
129 
130 static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
131 				     const struct firmware *fw,
132 				     size_t *offset);
133 
scp_ipi_init(struct mtk_scp * scp,const struct firmware * fw)134 static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
135 {
136 	int ret;
137 	size_t buf_sz, offset;
138 	size_t share_buf_offset;
139 	const struct mtk_scp_sizes_data *scp_sizes;
140 
141 	/* read the ipi buf addr from FW itself first */
142 	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
143 	if (ret) {
144 		/* use default ipi buf addr if the FW doesn't have it */
145 		offset = scp->data->ipi_buf_offset;
146 		if (!offset)
147 			return ret;
148 	}
149 	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
150 
151 	/* Make sure IPI buffer fits in the L2TCM range assigned to this core */
152 	buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
153 
154 	if (scp->sram_size < buf_sz + offset) {
155 		dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
156 		return -EOVERFLOW;
157 	}
158 
159 	scp_sizes = scp->data->scp_sizes;
160 	scp->recv_buf = (struct mtk_share_obj __iomem *)
161 			(scp->sram_base + offset);
162 	share_buf_offset = sizeof(scp->recv_buf->id)
163 		+ sizeof(scp->recv_buf->len) + scp_sizes->ipi_share_buffer_size;
164 	scp->send_buf = (struct mtk_share_obj __iomem *)
165 			(scp->sram_base + offset + share_buf_offset);
166 	memset_io(scp->recv_buf, 0, share_buf_offset);
167 	memset_io(scp->send_buf, 0, share_buf_offset);
168 
169 	return 0;
170 }
171 
mt8183_scp_reset_assert(struct mtk_scp * scp)172 static void mt8183_scp_reset_assert(struct mtk_scp *scp)
173 {
174 	u32 val;
175 
176 	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
177 	val &= ~MT8183_SW_RSTN_BIT;
178 	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
179 }
180 
mt8183_scp_reset_deassert(struct mtk_scp * scp)181 static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
182 {
183 	u32 val;
184 
185 	val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
186 	val |= MT8183_SW_RSTN_BIT;
187 	writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
188 }
189 
mt8192_scp_reset_assert(struct mtk_scp * scp)190 static void mt8192_scp_reset_assert(struct mtk_scp *scp)
191 {
192 	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
193 }
194 
mt8192_scp_reset_deassert(struct mtk_scp * scp)195 static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
196 {
197 	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
198 }
199 
mt8195_scp_c1_reset_assert(struct mtk_scp * scp)200 static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
201 {
202 	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
203 }
204 
mt8195_scp_c1_reset_deassert(struct mtk_scp * scp)205 static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
206 {
207 	writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
208 }
209 
mt8183_scp_irq_handler(struct mtk_scp * scp)210 static void mt8183_scp_irq_handler(struct mtk_scp *scp)
211 {
212 	u32 scp_to_host;
213 
214 	scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
215 	if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
216 		scp_ipi_handler(scp);
217 	else
218 		scp_wdt_handler(scp, scp_to_host);
219 
220 	/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
221 	writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
222 	       scp->cluster->reg_base + MT8183_SCP_TO_HOST);
223 }
224 
mt8192_scp_irq_handler(struct mtk_scp * scp)225 static void mt8192_scp_irq_handler(struct mtk_scp *scp)
226 {
227 	u32 scp_to_host;
228 
229 	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
230 
231 	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
232 		scp_ipi_handler(scp);
233 
234 		/*
235 		 * SCP won't send another interrupt until we clear
236 		 * MT8192_SCP2APMCU_IPC.
237 		 */
238 		writel(MT8192_SCP_IPC_INT_BIT,
239 		       scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
240 	} else {
241 		scp_wdt_handler(scp, scp_to_host);
242 		writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
243 	}
244 }
245 
mt8195_scp_irq_handler(struct mtk_scp * scp)246 static void mt8195_scp_irq_handler(struct mtk_scp *scp)
247 {
248 	u32 scp_to_host;
249 
250 	scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
251 
252 	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
253 		scp_ipi_handler(scp);
254 	} else {
255 		u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
256 
257 		if (reason & MT8195_CORE0_WDT)
258 			writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
259 
260 		if (reason & MT8195_CORE1_WDT)
261 			writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
262 
263 		scp_wdt_handler(scp, reason);
264 	}
265 
266 	writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
267 }
268 
mt8195_scp_c1_irq_handler(struct mtk_scp * scp)269 static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
270 {
271 	u32 scp_to_host;
272 
273 	scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
274 
275 	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
276 		scp_ipi_handler(scp);
277 
278 	writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
279 }
280 
scp_irq_handler(int irq,void * priv)281 static irqreturn_t scp_irq_handler(int irq, void *priv)
282 {
283 	struct mtk_scp *scp = priv;
284 	int ret;
285 
286 	ret = clk_prepare_enable(scp->clk);
287 	if (ret) {
288 		dev_err(scp->dev, "failed to enable clocks\n");
289 		return IRQ_NONE;
290 	}
291 
292 	scp->data->scp_irq_handler(scp);
293 
294 	clk_disable_unprepare(scp->clk);
295 
296 	return IRQ_HANDLED;
297 }
298 
scp_elf_load_segments(struct rproc * rproc,const struct firmware * fw)299 static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
300 {
301 	struct device *dev = &rproc->dev;
302 	struct elf32_hdr *ehdr;
303 	struct elf32_phdr *phdr;
304 	int i, ret = 0;
305 	const u8 *elf_data = fw->data;
306 
307 	ehdr = (struct elf32_hdr *)elf_data;
308 	phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
309 
310 	/* go through the available ELF segments */
311 	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
312 		u32 da = phdr->p_paddr;
313 		u32 memsz = phdr->p_memsz;
314 		u32 filesz = phdr->p_filesz;
315 		u32 offset = phdr->p_offset;
316 		void __iomem *ptr;
317 
318 		dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
319 			phdr->p_type, da, memsz, filesz);
320 
321 		if (phdr->p_type != PT_LOAD)
322 			continue;
323 		if (!filesz)
324 			continue;
325 
326 		if (filesz > memsz) {
327 			dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
328 				filesz, memsz);
329 			ret = -EINVAL;
330 			break;
331 		}
332 
333 		if (offset + filesz > fw->size) {
334 			dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
335 				offset + filesz, fw->size);
336 			ret = -EINVAL;
337 			break;
338 		}
339 
340 		/* grab the kernel address for this device address */
341 		ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
342 		if (!ptr) {
343 			dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
344 			ret = -EINVAL;
345 			break;
346 		}
347 
348 		/* put the segment where the remote processor expects it */
349 		scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
350 	}
351 
352 	return ret;
353 }
354 
scp_elf_read_ipi_buf_addr(struct mtk_scp * scp,const struct firmware * fw,size_t * offset)355 static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
356 				     const struct firmware *fw,
357 				     size_t *offset)
358 {
359 	struct elf32_hdr *ehdr;
360 	struct elf32_shdr *shdr, *shdr_strtab;
361 	int i;
362 	const u8 *elf_data = fw->data;
363 	const char *strtab;
364 
365 	ehdr = (struct elf32_hdr *)elf_data;
366 	shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
367 	shdr_strtab = shdr + ehdr->e_shstrndx;
368 	strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
369 
370 	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
371 		if (strcmp(strtab + shdr->sh_name,
372 			   SECTION_NAME_IPI_BUFFER) == 0) {
373 			*offset = shdr->sh_addr;
374 			return 0;
375 		}
376 	}
377 
378 	return -ENOENT;
379 }
380 
mt8183_scp_clk_get(struct mtk_scp * scp)381 static int mt8183_scp_clk_get(struct mtk_scp *scp)
382 {
383 	struct device *dev = scp->dev;
384 	int ret = 0;
385 
386 	scp->clk = devm_clk_get(dev, "main");
387 	if (IS_ERR(scp->clk)) {
388 		dev_err(dev, "Failed to get clock\n");
389 		ret = PTR_ERR(scp->clk);
390 	}
391 
392 	return ret;
393 }
394 
mt8192_scp_clk_get(struct mtk_scp * scp)395 static int mt8192_scp_clk_get(struct mtk_scp *scp)
396 {
397 	return mt8183_scp_clk_get(scp);
398 }
399 
mt8195_scp_clk_get(struct mtk_scp * scp)400 static int mt8195_scp_clk_get(struct mtk_scp *scp)
401 {
402 	scp->clk = NULL;
403 
404 	return 0;
405 }
406 
mt8183_scp_before_load(struct mtk_scp * scp)407 static int mt8183_scp_before_load(struct mtk_scp *scp)
408 {
409 	/* Clear SCP to host interrupt */
410 	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
411 
412 	/* Reset clocks before loading FW */
413 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
414 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
415 
416 	/* Initialize TCM before loading FW. */
417 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
418 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
419 
420 	/* Turn on the power of SCP's SRAM before using it. */
421 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
422 
423 	/*
424 	 * Set I-cache and D-cache size before loading SCP FW.
425 	 * SCP SRAM logical address may change when cache size setting differs.
426 	 */
427 	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
428 	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
429 	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
430 
431 	return 0;
432 }
433 
scp_sram_power_on(void __iomem * addr,u32 reserved_mask)434 static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
435 {
436 	int i;
437 
438 	for (i = 31; i >= 0; i--)
439 		writel(GENMASK(i, 0) & ~reserved_mask, addr);
440 	writel(0, addr);
441 }
442 
scp_sram_power_off(void __iomem * addr,u32 reserved_mask)443 static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
444 {
445 	int i;
446 
447 	writel(0, addr);
448 	for (i = 0; i < 32; i++)
449 		writel(GENMASK(i, 0) & ~reserved_mask, addr);
450 }
451 
mt8186_scp_before_load(struct mtk_scp * scp)452 static int mt8186_scp_before_load(struct mtk_scp *scp)
453 {
454 	/* Clear SCP to host interrupt */
455 	writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
456 
457 	/* Reset clocks before loading FW */
458 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
459 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
460 
461 	/* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
462 	scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
463 
464 	/* Initialize TCM before loading FW. */
465 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
466 	writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
467 	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
468 	writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
469 
470 	/*
471 	 * Set I-cache and D-cache size before loading SCP FW.
472 	 * SCP SRAM logical address may change when cache size setting differs.
473 	 */
474 	writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
475 	       scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
476 	writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
477 
478 	return 0;
479 }
480 
mt8188_scp_l2tcm_on(struct mtk_scp * scp)481 static int mt8188_scp_l2tcm_on(struct mtk_scp *scp)
482 {
483 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
484 
485 	mutex_lock(&scp_cluster->cluster_lock);
486 
487 	if (scp_cluster->l2tcm_refcnt == 0) {
488 		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
489 		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
490 
491 		/* Power on L2TCM */
492 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
493 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
494 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
495 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
496 	}
497 
498 	scp_cluster->l2tcm_refcnt += 1;
499 
500 	mutex_unlock(&scp_cluster->cluster_lock);
501 
502 	return 0;
503 }
504 
mt8188_scp_before_load(struct mtk_scp * scp)505 static int mt8188_scp_before_load(struct mtk_scp *scp)
506 {
507 	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
508 
509 	mt8188_scp_l2tcm_on(scp);
510 
511 	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
512 
513 	/* enable MPU for all memory regions */
514 	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
515 
516 	return 0;
517 }
518 
mt8188_scp_c1_before_load(struct mtk_scp * scp)519 static int mt8188_scp_c1_before_load(struct mtk_scp *scp)
520 {
521 	u32 sec_ctrl;
522 	struct mtk_scp *scp_c0;
523 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
524 
525 	scp->data->scp_reset_assert(scp);
526 
527 	mt8188_scp_l2tcm_on(scp);
528 
529 	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
530 
531 	/* enable MPU for all memory regions */
532 	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
533 
534 	/*
535 	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
536 	 * on SRAM when SCP core 1 accesses SRAM.
537 	 *
538 	 * This configuration solves booting the SCP core 0 and core 1 from
539 	 * different SRAM address because core 0 and core 1 both boot from
540 	 * the head of SRAM by default. this must be configured before boot SCP core 1.
541 	 *
542 	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
543 	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
544 	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
545 	 * The shift action is tranparent to software.
546 	 */
547 	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
548 	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
549 
550 	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
551 	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
552 
553 	/* enable SRAM offset when fetching instruction and data */
554 	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
555 	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
556 	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
557 
558 	return 0;
559 }
560 
mt8192_scp_before_load(struct mtk_scp * scp)561 static int mt8192_scp_before_load(struct mtk_scp *scp)
562 {
563 	/* clear SPM interrupt, SCP2SPM_IPC_CLR */
564 	writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
565 
566 	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
567 
568 	/* enable SRAM clock */
569 	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
570 	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
571 	scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
572 	scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
573 	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
574 
575 	/* enable MPU for all memory regions */
576 	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
577 
578 	return 0;
579 }
580 
mt8195_scp_l2tcm_on(struct mtk_scp * scp)581 static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
582 {
583 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
584 
585 	mutex_lock(&scp_cluster->cluster_lock);
586 
587 	if (scp_cluster->l2tcm_refcnt == 0) {
588 		/* clear SPM interrupt, SCP2SPM_IPC_CLR */
589 		writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
590 
591 		/* Power on L2TCM */
592 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
593 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
594 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
595 		scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
596 				  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
597 	}
598 
599 	scp_cluster->l2tcm_refcnt += 1;
600 
601 	mutex_unlock(&scp_cluster->cluster_lock);
602 
603 	return 0;
604 }
605 
mt8195_scp_before_load(struct mtk_scp * scp)606 static int mt8195_scp_before_load(struct mtk_scp *scp)
607 {
608 	writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
609 
610 	mt8195_scp_l2tcm_on(scp);
611 
612 	scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
613 
614 	/* enable MPU for all memory regions */
615 	writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
616 
617 	return 0;
618 }
619 
mt8195_scp_c1_before_load(struct mtk_scp * scp)620 static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
621 {
622 	u32 sec_ctrl;
623 	struct mtk_scp *scp_c0;
624 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
625 
626 	scp->data->scp_reset_assert(scp);
627 
628 	mt8195_scp_l2tcm_on(scp);
629 
630 	scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
631 
632 	/* enable MPU for all memory regions */
633 	writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
634 
635 	/*
636 	 * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
637 	 * on SRAM when SCP core 1 accesses SRAM.
638 	 *
639 	 * This configuration solves booting the SCP core 0 and core 1 from
640 	 * different SRAM address because core 0 and core 1 both boot from
641 	 * the head of SRAM by default. this must be configured before boot SCP core 1.
642 	 *
643 	 * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
644 	 * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
645 	 * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
646 	 * The shift action is tranparent to software.
647 	 */
648 	writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
649 	writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
650 
651 	scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
652 	writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
653 
654 	/* enable SRAM offset when fetching instruction and data */
655 	sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
656 	sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
657 	writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
658 
659 	return 0;
660 }
661 
scp_load(struct rproc * rproc,const struct firmware * fw)662 static int scp_load(struct rproc *rproc, const struct firmware *fw)
663 {
664 	struct mtk_scp *scp = rproc->priv;
665 	struct device *dev = scp->dev;
666 	int ret;
667 
668 	ret = clk_prepare_enable(scp->clk);
669 	if (ret) {
670 		dev_err(dev, "failed to enable clocks\n");
671 		return ret;
672 	}
673 
674 	/* Hold SCP in reset while loading FW. */
675 	scp->data->scp_reset_assert(scp);
676 
677 	ret = scp->data->scp_before_load(scp);
678 	if (ret < 0)
679 		goto leave;
680 
681 	ret = scp_elf_load_segments(rproc, fw);
682 leave:
683 	clk_disable_unprepare(scp->clk);
684 
685 	return ret;
686 }
687 
scp_parse_fw(struct rproc * rproc,const struct firmware * fw)688 static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
689 {
690 	struct mtk_scp *scp = rproc->priv;
691 	struct device *dev = scp->dev;
692 	int ret;
693 
694 	ret = clk_prepare_enable(scp->clk);
695 	if (ret) {
696 		dev_err(dev, "failed to enable clocks\n");
697 		return ret;
698 	}
699 
700 	ret = scp_ipi_init(scp, fw);
701 	clk_disable_unprepare(scp->clk);
702 	return ret;
703 }
704 
scp_start(struct rproc * rproc)705 static int scp_start(struct rproc *rproc)
706 {
707 	struct mtk_scp *scp = rproc->priv;
708 	struct device *dev = scp->dev;
709 	struct scp_run *run = &scp->run;
710 	int ret;
711 
712 	ret = clk_prepare_enable(scp->clk);
713 	if (ret) {
714 		dev_err(dev, "failed to enable clocks\n");
715 		return ret;
716 	}
717 
718 	run->signaled = false;
719 
720 	scp->data->scp_reset_deassert(scp);
721 
722 	ret = wait_event_interruptible_timeout(
723 					run->wq,
724 					run->signaled,
725 					msecs_to_jiffies(2000));
726 
727 	if (ret == 0) {
728 		dev_err(dev, "wait SCP initialization timeout!\n");
729 		ret = -ETIME;
730 		goto stop;
731 	}
732 	if (ret == -ERESTARTSYS) {
733 		dev_err(dev, "wait SCP interrupted by a signal!\n");
734 		goto stop;
735 	}
736 
737 	clk_disable_unprepare(scp->clk);
738 	dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
739 
740 	return 0;
741 
742 stop:
743 	scp->data->scp_reset_assert(scp);
744 	clk_disable_unprepare(scp->clk);
745 	return ret;
746 }
747 
mt8183_scp_da_to_va(struct mtk_scp * scp,u64 da,size_t len)748 static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
749 {
750 	int offset;
751 	const struct mtk_scp_sizes_data *scp_sizes;
752 
753 	scp_sizes = scp->data->scp_sizes;
754 	if (da < scp->sram_size) {
755 		offset = da;
756 		if (offset >= 0 && (offset + len) <= scp->sram_size)
757 			return (void __force *)scp->sram_base + offset;
758 	} else if (scp_sizes->max_dram_size) {
759 		offset = da - scp->dma_addr;
760 		if (offset >= 0 && (offset + len) <= scp_sizes->max_dram_size)
761 			return scp->cpu_addr + offset;
762 	}
763 
764 	return NULL;
765 }
766 
mt8192_scp_da_to_va(struct mtk_scp * scp,u64 da,size_t len)767 static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
768 {
769 	int offset;
770 	const struct mtk_scp_sizes_data *scp_sizes;
771 
772 	scp_sizes = scp->data->scp_sizes;
773 	if (da >= scp->sram_phys &&
774 	    (da + len) <= scp->sram_phys + scp->sram_size) {
775 		offset = da - scp->sram_phys;
776 		return (void __force *)scp->sram_base + offset;
777 	}
778 
779 	/* optional memory region */
780 	if (scp->cluster->l1tcm_size &&
781 	    da >= scp->cluster->l1tcm_phys &&
782 	    (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
783 		offset = da - scp->cluster->l1tcm_phys;
784 		return (void __force *)scp->cluster->l1tcm_base + offset;
785 	}
786 
787 	/* optional memory region */
788 	if (scp_sizes->max_dram_size &&
789 	    da >= scp->dma_addr &&
790 	    (da + len) <= scp->dma_addr + scp_sizes->max_dram_size) {
791 		offset = da - scp->dma_addr;
792 		return scp->cpu_addr + offset;
793 	}
794 
795 	return NULL;
796 }
797 
scp_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)798 static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
799 {
800 	struct mtk_scp *scp = rproc->priv;
801 
802 	return scp->data->scp_da_to_va(scp, da, len);
803 }
804 
mt8183_scp_stop(struct mtk_scp * scp)805 static void mt8183_scp_stop(struct mtk_scp *scp)
806 {
807 	/* Disable SCP watchdog */
808 	writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
809 }
810 
mt8188_scp_l2tcm_off(struct mtk_scp * scp)811 static void mt8188_scp_l2tcm_off(struct mtk_scp *scp)
812 {
813 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
814 
815 	mutex_lock(&scp_cluster->cluster_lock);
816 
817 	if (scp_cluster->l2tcm_refcnt > 0)
818 		scp_cluster->l2tcm_refcnt -= 1;
819 
820 	if (scp_cluster->l2tcm_refcnt == 0) {
821 		/* Power off L2TCM */
822 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
823 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
824 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
825 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
826 	}
827 
828 	mutex_unlock(&scp_cluster->cluster_lock);
829 }
830 
mt8188_scp_stop(struct mtk_scp * scp)831 static void mt8188_scp_stop(struct mtk_scp *scp)
832 {
833 	mt8188_scp_l2tcm_off(scp);
834 
835 	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
836 
837 	/* Disable SCP watchdog */
838 	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
839 }
840 
mt8188_scp_c1_stop(struct mtk_scp * scp)841 static void mt8188_scp_c1_stop(struct mtk_scp *scp)
842 {
843 	mt8188_scp_l2tcm_off(scp);
844 
845 	/* Power off CPU SRAM */
846 	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
847 
848 	/* Disable SCP watchdog */
849 	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
850 }
851 
mt8192_scp_stop(struct mtk_scp * scp)852 static void mt8192_scp_stop(struct mtk_scp *scp)
853 {
854 	/* Disable SRAM clock */
855 	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
856 	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
857 	scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
858 	scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
859 	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
860 
861 	/* Disable SCP watchdog */
862 	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
863 }
864 
mt8195_scp_l2tcm_off(struct mtk_scp * scp)865 static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
866 {
867 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
868 
869 	mutex_lock(&scp_cluster->cluster_lock);
870 
871 	if (scp_cluster->l2tcm_refcnt > 0)
872 		scp_cluster->l2tcm_refcnt -= 1;
873 
874 	if (scp_cluster->l2tcm_refcnt == 0) {
875 		/* Power off L2TCM */
876 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
877 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
878 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
879 		scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
880 				   MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
881 	}
882 
883 	mutex_unlock(&scp_cluster->cluster_lock);
884 }
885 
mt8195_scp_stop(struct mtk_scp * scp)886 static void mt8195_scp_stop(struct mtk_scp *scp)
887 {
888 	mt8195_scp_l2tcm_off(scp);
889 
890 	scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
891 
892 	/* Disable SCP watchdog */
893 	writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
894 }
895 
mt8195_scp_c1_stop(struct mtk_scp * scp)896 static void mt8195_scp_c1_stop(struct mtk_scp *scp)
897 {
898 	mt8195_scp_l2tcm_off(scp);
899 
900 	/* Power off CPU SRAM */
901 	scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
902 
903 	/* Disable SCP watchdog */
904 	writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
905 }
906 
scp_stop(struct rproc * rproc)907 static int scp_stop(struct rproc *rproc)
908 {
909 	struct mtk_scp *scp = rproc->priv;
910 	int ret;
911 
912 	ret = clk_prepare_enable(scp->clk);
913 	if (ret) {
914 		dev_err(scp->dev, "failed to enable clocks\n");
915 		return ret;
916 	}
917 
918 	scp->data->scp_reset_assert(scp);
919 	scp->data->scp_stop(scp);
920 	clk_disable_unprepare(scp->clk);
921 
922 	return 0;
923 }
924 
925 static const struct rproc_ops scp_ops = {
926 	.start		= scp_start,
927 	.stop		= scp_stop,
928 	.load		= scp_load,
929 	.da_to_va	= scp_da_to_va,
930 	.parse_fw	= scp_parse_fw,
931 	.sanity_check	= rproc_elf_sanity_check,
932 };
933 
934 /**
935  * scp_get_device() - get device struct of SCP
936  *
937  * @scp:	mtk_scp structure
938  **/
scp_get_device(struct mtk_scp * scp)939 struct device *scp_get_device(struct mtk_scp *scp)
940 {
941 	return scp->dev;
942 }
943 EXPORT_SYMBOL_GPL(scp_get_device);
944 
945 /**
946  * scp_get_rproc() - get rproc struct of SCP
947  *
948  * @scp:	mtk_scp structure
949  **/
scp_get_rproc(struct mtk_scp * scp)950 struct rproc *scp_get_rproc(struct mtk_scp *scp)
951 {
952 	return scp->rproc;
953 }
954 EXPORT_SYMBOL_GPL(scp_get_rproc);
955 
956 /**
957  * scp_get_vdec_hw_capa() - get video decoder hardware capability
958  *
959  * @scp:	mtk_scp structure
960  *
961  * Return: video decoder hardware capability
962  **/
scp_get_vdec_hw_capa(struct mtk_scp * scp)963 unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
964 {
965 	return scp->run.dec_capability;
966 }
967 EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
968 
969 /**
970  * scp_get_venc_hw_capa() - get video encoder hardware capability
971  *
972  * @scp:	mtk_scp structure
973  *
974  * Return: video encoder hardware capability
975  **/
scp_get_venc_hw_capa(struct mtk_scp * scp)976 unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
977 {
978 	return scp->run.enc_capability;
979 }
980 EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
981 
982 /**
983  * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
984  *
985  * @scp:	mtk_scp structure
986  * @mem_addr:	SCP views memory address
987  *
988  * Mapping the SCP's SRAM address /
989  * DMEM (Data Extended Memory) memory address /
990  * Working buffer memory address to
991  * kernel virtual address.
992  *
993  * Return: Return ERR_PTR(-EINVAL) if mapping failed,
994  * otherwise the mapped kernel virtual address
995  **/
scp_mapping_dm_addr(struct mtk_scp * scp,u32 mem_addr)996 void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
997 {
998 	void *ptr;
999 
1000 	ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
1001 	if (!ptr)
1002 		return ERR_PTR(-EINVAL);
1003 
1004 	return ptr;
1005 }
1006 EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
1007 
scp_map_memory_region(struct mtk_scp * scp)1008 static int scp_map_memory_region(struct mtk_scp *scp)
1009 {
1010 	int ret;
1011 	const struct mtk_scp_sizes_data *scp_sizes;
1012 
1013 	ret = of_reserved_mem_device_init(scp->dev);
1014 
1015 	/* reserved memory is optional. */
1016 	if (ret == -ENODEV) {
1017 		dev_info(scp->dev, "skipping reserved memory initialization.");
1018 		return 0;
1019 	}
1020 
1021 	if (ret) {
1022 		dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
1023 		return -ENOMEM;
1024 	}
1025 
1026 	/* Reserved SCP code size */
1027 	scp_sizes = scp->data->scp_sizes;
1028 	scp->cpu_addr = dma_alloc_coherent(scp->dev, scp_sizes->max_dram_size,
1029 					   &scp->dma_addr, GFP_KERNEL);
1030 	if (!scp->cpu_addr)
1031 		return -ENOMEM;
1032 
1033 	return 0;
1034 }
1035 
scp_unmap_memory_region(struct mtk_scp * scp)1036 static void scp_unmap_memory_region(struct mtk_scp *scp)
1037 {
1038 	const struct mtk_scp_sizes_data *scp_sizes;
1039 
1040 	scp_sizes = scp->data->scp_sizes;
1041 	if (scp_sizes->max_dram_size == 0)
1042 		return;
1043 
1044 	dma_free_coherent(scp->dev, scp_sizes->max_dram_size, scp->cpu_addr,
1045 			  scp->dma_addr);
1046 	of_reserved_mem_device_release(scp->dev);
1047 }
1048 
scp_register_ipi(struct platform_device * pdev,u32 id,ipi_handler_t handler,void * priv)1049 static int scp_register_ipi(struct platform_device *pdev, u32 id,
1050 			    ipi_handler_t handler, void *priv)
1051 {
1052 	struct mtk_scp *scp = platform_get_drvdata(pdev);
1053 
1054 	return scp_ipi_register(scp, id, handler, priv);
1055 }
1056 
scp_unregister_ipi(struct platform_device * pdev,u32 id)1057 static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
1058 {
1059 	struct mtk_scp *scp = platform_get_drvdata(pdev);
1060 
1061 	scp_ipi_unregister(scp, id);
1062 }
1063 
scp_send_ipi(struct platform_device * pdev,u32 id,void * buf,unsigned int len,unsigned int wait)1064 static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
1065 			unsigned int len, unsigned int wait)
1066 {
1067 	struct mtk_scp *scp = platform_get_drvdata(pdev);
1068 
1069 	return scp_ipi_send(scp, id, buf, len, wait);
1070 }
1071 
1072 static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
1073 	.send_ipi = scp_send_ipi,
1074 	.register_ipi = scp_register_ipi,
1075 	.unregister_ipi = scp_unregister_ipi,
1076 	.ns_ipi_id = SCP_IPI_NS_SERVICE,
1077 };
1078 
scp_add_rpmsg_subdev(struct mtk_scp * scp)1079 static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
1080 {
1081 	scp->rpmsg_subdev =
1082 		mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
1083 					      &mtk_scp_rpmsg_info);
1084 	if (scp->rpmsg_subdev)
1085 		rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
1086 }
1087 
scp_remove_rpmsg_subdev(struct mtk_scp * scp)1088 static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
1089 {
1090 	if (scp->rpmsg_subdev) {
1091 		rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
1092 		mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
1093 		scp->rpmsg_subdev = NULL;
1094 	}
1095 }
1096 
1097 /**
1098  * scp_get_default_fw_path() - Get default SCP firmware path
1099  * @dev:     SCP Device
1100  * @core_id: SCP Core number
1101  *
1102  * This function generates a path based on the following format:
1103  *     mediatek/(soc_model)/scp(_cX).img; for multi-core or
1104  *     mediatek/(soc_model)/scp.img for single core SCP HW
1105  *
1106  * Return: A devm allocated string containing the full path to
1107  *         a SCP firmware or an error pointer
1108  */
scp_get_default_fw_path(struct device * dev,int core_id)1109 static const char *scp_get_default_fw_path(struct device *dev, int core_id)
1110 {
1111 	struct device_node *np = core_id < 0 ? dev->of_node : dev->parent->of_node;
1112 	const char *compatible, *soc;
1113 	char scp_fw_file[7];
1114 	int ret;
1115 
1116 	/* Use only the first compatible string */
1117 	ret = of_property_read_string_index(np, "compatible", 0, &compatible);
1118 	if (ret)
1119 		return ERR_PTR(ret);
1120 
1121 	/* If the compatible string's length is implausible bail out early */
1122 	if (strlen(compatible) < strlen("mediatek,mtXXXX-scp"))
1123 		return ERR_PTR(-EINVAL);
1124 
1125 	/* If the compatible string starts with "mediatek,mt" assume that it's ok */
1126 	if (!str_has_prefix(compatible, "mediatek,mt"))
1127 		return ERR_PTR(-EINVAL);
1128 
1129 	if (core_id >= 0)
1130 		ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp_c%d", core_id);
1131 	else
1132 		ret = snprintf(scp_fw_file, sizeof(scp_fw_file), "scp");
1133 	if (ret >= sizeof(scp_fw_file))
1134 		return ERR_PTR(-ENAMETOOLONG);
1135 
1136 	/* Not using strchr here, as strlen of a const gets optimized by compiler */
1137 	soc = &compatible[strlen("mediatek,")];
1138 
1139 	return devm_kasprintf(dev, GFP_KERNEL, "mediatek/%.*s/%s.img",
1140 			      (int)strlen("mtXXXX"), soc, scp_fw_file);
1141 }
1142 
scp_rproc_init(struct platform_device * pdev,struct mtk_scp_of_cluster * scp_cluster,const struct mtk_scp_of_data * of_data,int core_id)1143 static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
1144 				      struct mtk_scp_of_cluster *scp_cluster,
1145 				      const struct mtk_scp_of_data *of_data,
1146 				      int core_id)
1147 {
1148 	struct device *dev = &pdev->dev;
1149 	struct device_node *np = dev->of_node;
1150 	struct mtk_scp *scp;
1151 	struct rproc *rproc;
1152 	struct resource *res;
1153 	const char *fw_name;
1154 	int ret, i;
1155 	const struct mtk_scp_sizes_data *scp_sizes;
1156 
1157 	ret = rproc_of_parse_firmware(dev, 0, &fw_name);
1158 	if (ret) {
1159 		fw_name = scp_get_default_fw_path(dev, core_id);
1160 		if (IS_ERR(fw_name)) {
1161 			dev_err(dev, "Cannot get firmware path: %ld\n", PTR_ERR(fw_name));
1162 			return ERR_CAST(fw_name);
1163 		}
1164 	}
1165 
1166 	rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
1167 	if (!rproc) {
1168 		dev_err(dev, "unable to allocate remoteproc\n");
1169 		return ERR_PTR(-ENOMEM);
1170 	}
1171 
1172 	scp = rproc->priv;
1173 	scp->rproc = rproc;
1174 	scp->dev = dev;
1175 	scp->data = of_data;
1176 	scp->cluster = scp_cluster;
1177 	platform_set_drvdata(pdev, scp);
1178 
1179 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1180 	scp->sram_base = devm_ioremap_resource(dev, res);
1181 	if (IS_ERR(scp->sram_base)) {
1182 		dev_err(dev, "Failed to parse and map sram memory\n");
1183 		return ERR_CAST(scp->sram_base);
1184 	}
1185 
1186 	scp->sram_size = resource_size(res);
1187 	scp->sram_phys = res->start;
1188 
1189 	ret = scp->data->scp_clk_get(scp);
1190 	if (ret)
1191 		return ERR_PTR(ret);
1192 
1193 	ret = scp_map_memory_region(scp);
1194 	if (ret)
1195 		return ERR_PTR(ret);
1196 
1197 	mutex_init(&scp->send_lock);
1198 	for (i = 0; i < SCP_IPI_MAX; i++)
1199 		mutex_init(&scp->ipi_desc[i].lock);
1200 
1201 	/* register SCP initialization IPI */
1202 	ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
1203 	if (ret) {
1204 		dev_err(dev, "Failed to register IPI_SCP_INIT\n");
1205 		goto release_dev_mem;
1206 	}
1207 
1208 	scp_sizes = scp->data->scp_sizes;
1209 	scp->share_buf = kzalloc(scp_sizes->ipi_share_buffer_size, GFP_KERNEL);
1210 	if (!scp->share_buf) {
1211 		dev_err(dev, "Failed to allocate IPI share buffer\n");
1212 		ret = -ENOMEM;
1213 		goto release_dev_mem;
1214 	}
1215 
1216 	init_waitqueue_head(&scp->run.wq);
1217 	init_waitqueue_head(&scp->ack_wq);
1218 
1219 	scp_add_rpmsg_subdev(scp);
1220 
1221 	ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
1222 					scp_irq_handler, IRQF_ONESHOT,
1223 					pdev->name, scp);
1224 
1225 	if (ret) {
1226 		dev_err(dev, "failed to request irq\n");
1227 		goto remove_subdev;
1228 	}
1229 
1230 	return scp;
1231 
1232 remove_subdev:
1233 	scp_remove_rpmsg_subdev(scp);
1234 	scp_ipi_unregister(scp, SCP_IPI_INIT);
1235 	kfree(scp->share_buf);
1236 	scp->share_buf = NULL;
1237 release_dev_mem:
1238 	scp_unmap_memory_region(scp);
1239 	for (i = 0; i < SCP_IPI_MAX; i++)
1240 		mutex_destroy(&scp->ipi_desc[i].lock);
1241 	mutex_destroy(&scp->send_lock);
1242 
1243 	return ERR_PTR(ret);
1244 }
1245 
scp_free(struct mtk_scp * scp)1246 static void scp_free(struct mtk_scp *scp)
1247 {
1248 	int i;
1249 
1250 	scp_remove_rpmsg_subdev(scp);
1251 	scp_ipi_unregister(scp, SCP_IPI_INIT);
1252 	kfree(scp->share_buf);
1253 	scp->share_buf = NULL;
1254 	scp_unmap_memory_region(scp);
1255 	for (i = 0; i < SCP_IPI_MAX; i++)
1256 		mutex_destroy(&scp->ipi_desc[i].lock);
1257 	mutex_destroy(&scp->send_lock);
1258 }
1259 
scp_add_single_core(struct platform_device * pdev,struct mtk_scp_of_cluster * scp_cluster)1260 static int scp_add_single_core(struct platform_device *pdev,
1261 			       struct mtk_scp_of_cluster *scp_cluster)
1262 {
1263 	struct device *dev = &pdev->dev;
1264 	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1265 	struct mtk_scp *scp;
1266 	int ret;
1267 
1268 	scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev), -1);
1269 	if (IS_ERR(scp))
1270 		return PTR_ERR(scp);
1271 
1272 	ret = rproc_add(scp->rproc);
1273 	if (ret) {
1274 		dev_err(dev, "Failed to add rproc\n");
1275 		scp_free(scp);
1276 		return ret;
1277 	}
1278 
1279 	list_add_tail(&scp->elem, scp_list);
1280 
1281 	return 0;
1282 }
1283 
scp_add_multi_core(struct platform_device * pdev,struct mtk_scp_of_cluster * scp_cluster)1284 static int scp_add_multi_core(struct platform_device *pdev,
1285 			      struct mtk_scp_of_cluster *scp_cluster)
1286 {
1287 	struct device *dev = &pdev->dev;
1288 	struct device_node *np = dev_of_node(dev);
1289 	struct platform_device *cpdev;
1290 	struct device_node *child;
1291 	struct list_head *scp_list = &scp_cluster->mtk_scp_list;
1292 	const struct mtk_scp_of_data **cluster_of_data;
1293 	struct mtk_scp *scp, *temp;
1294 	int core_id = 0;
1295 	int ret;
1296 
1297 	cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
1298 
1299 	for_each_available_child_of_node(np, child) {
1300 		if (!cluster_of_data[core_id]) {
1301 			ret = -EINVAL;
1302 			dev_err(dev, "Not support core %d\n", core_id);
1303 			of_node_put(child);
1304 			goto init_fail;
1305 		}
1306 
1307 		cpdev = of_find_device_by_node(child);
1308 		if (!cpdev) {
1309 			ret = -ENODEV;
1310 			dev_err(dev, "Not found platform device for core %d\n", core_id);
1311 			of_node_put(child);
1312 			goto init_fail;
1313 		}
1314 
1315 		scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id], core_id);
1316 		put_device(&cpdev->dev);
1317 		if (IS_ERR(scp)) {
1318 			ret = PTR_ERR(scp);
1319 			dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
1320 			of_node_put(child);
1321 			goto init_fail;
1322 		}
1323 
1324 		ret = rproc_add(scp->rproc);
1325 		if (ret) {
1326 			dev_err(dev, "Failed to add rproc of core %d\n", core_id);
1327 			of_node_put(child);
1328 			scp_free(scp);
1329 			goto init_fail;
1330 		}
1331 
1332 		list_add_tail(&scp->elem, scp_list);
1333 		core_id++;
1334 	}
1335 
1336 	/*
1337 	 * Here we are setting the platform device for @pdev to the last @scp that was
1338 	 * created, which is needed because (1) scp_rproc_init() is calling
1339 	 * platform_set_drvdata() on the child platform devices and (2) we need a handle to
1340 	 * the cluster list in scp_remove().
1341 	 */
1342 	platform_set_drvdata(pdev, scp);
1343 
1344 	return 0;
1345 
1346 init_fail:
1347 	list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
1348 		list_del(&scp->elem);
1349 		rproc_del(scp->rproc);
1350 		scp_free(scp);
1351 	}
1352 
1353 	return ret;
1354 }
1355 
scp_is_single_core(struct platform_device * pdev)1356 static bool scp_is_single_core(struct platform_device *pdev)
1357 {
1358 	struct device *dev = &pdev->dev;
1359 	struct device_node *np = dev_of_node(dev);
1360 	struct device_node *child;
1361 	int num_cores = 0;
1362 
1363 	for_each_child_of_node(np, child)
1364 		if (of_device_is_compatible(child, "mediatek,scp-core"))
1365 			num_cores++;
1366 
1367 	return num_cores < 2;
1368 }
1369 
scp_cluster_init(struct platform_device * pdev,struct mtk_scp_of_cluster * scp_cluster)1370 static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
1371 {
1372 	int ret;
1373 
1374 	if (scp_is_single_core(pdev))
1375 		ret = scp_add_single_core(pdev, scp_cluster);
1376 	else
1377 		ret = scp_add_multi_core(pdev, scp_cluster);
1378 
1379 	return ret;
1380 }
1381 
1382 static const struct of_device_id scp_core_match[] = {
1383 	{ .compatible = "mediatek,scp-core" },
1384 	{}
1385 };
1386 
scp_probe(struct platform_device * pdev)1387 static int scp_probe(struct platform_device *pdev)
1388 {
1389 	struct device *dev = &pdev->dev;
1390 	struct mtk_scp_of_cluster *scp_cluster;
1391 	struct resource *res;
1392 	int ret;
1393 
1394 	scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
1395 	if (!scp_cluster)
1396 		return -ENOMEM;
1397 
1398 	scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
1399 	if (IS_ERR(scp_cluster->reg_base))
1400 		return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
1401 				     "Failed to parse and map cfg memory\n");
1402 
1403 	/* l1tcm is an optional memory region */
1404 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
1405 	if (res) {
1406 		scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
1407 		if (IS_ERR(scp_cluster->l1tcm_base))
1408 			return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base),
1409 					     "Failed to map l1tcm memory\n");
1410 
1411 		scp_cluster->l1tcm_size = resource_size(res);
1412 		scp_cluster->l1tcm_phys = res->start;
1413 	}
1414 
1415 	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
1416 	mutex_init(&scp_cluster->cluster_lock);
1417 
1418 	ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
1419 	if (ret)
1420 		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
1421 
1422 	ret = scp_cluster_init(pdev, scp_cluster);
1423 	if (ret) {
1424 		of_platform_depopulate(dev);
1425 		return ret;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
scp_remove(struct platform_device * pdev)1431 static void scp_remove(struct platform_device *pdev)
1432 {
1433 	struct mtk_scp *scp = platform_get_drvdata(pdev);
1434 	struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
1435 	struct mtk_scp *temp;
1436 
1437 	list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
1438 		list_del(&scp->elem);
1439 		rproc_del(scp->rproc);
1440 		scp_free(scp);
1441 	}
1442 	of_platform_depopulate(&pdev->dev);
1443 	mutex_destroy(&scp_cluster->cluster_lock);
1444 }
1445 
1446 static const struct mtk_scp_sizes_data default_scp_sizes = {
1447 	.max_dram_size = 0x500000,
1448 	.ipi_share_buffer_size = 288,
1449 };
1450 
1451 static const struct mtk_scp_sizes_data mt8188_scp_sizes = {
1452 	.max_dram_size = 0x800000,
1453 	.ipi_share_buffer_size = 600,
1454 };
1455 
1456 static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = {
1457 	.max_dram_size = 0xA00000,
1458 	.ipi_share_buffer_size = 600,
1459 };
1460 
1461 static const struct mtk_scp_sizes_data mt8195_scp_sizes = {
1462 	.max_dram_size = 0x800000,
1463 	.ipi_share_buffer_size = 288,
1464 };
1465 
1466 static const struct mtk_scp_of_data mt8183_of_data = {
1467 	.scp_clk_get = mt8183_scp_clk_get,
1468 	.scp_before_load = mt8183_scp_before_load,
1469 	.scp_irq_handler = mt8183_scp_irq_handler,
1470 	.scp_reset_assert = mt8183_scp_reset_assert,
1471 	.scp_reset_deassert = mt8183_scp_reset_deassert,
1472 	.scp_stop = mt8183_scp_stop,
1473 	.scp_da_to_va = mt8183_scp_da_to_va,
1474 	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1475 	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1476 	.ipi_buf_offset = 0x7bdb0,
1477 	.scp_sizes = &default_scp_sizes,
1478 };
1479 
1480 static const struct mtk_scp_of_data mt8186_of_data = {
1481 	.scp_clk_get = mt8195_scp_clk_get,
1482 	.scp_before_load = mt8186_scp_before_load,
1483 	.scp_irq_handler = mt8183_scp_irq_handler,
1484 	.scp_reset_assert = mt8183_scp_reset_assert,
1485 	.scp_reset_deassert = mt8183_scp_reset_deassert,
1486 	.scp_stop = mt8183_scp_stop,
1487 	.scp_da_to_va = mt8183_scp_da_to_va,
1488 	.host_to_scp_reg = MT8183_HOST_TO_SCP,
1489 	.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
1490 	.ipi_buf_offset = 0x3bdb0,
1491 	.scp_sizes = &default_scp_sizes,
1492 };
1493 
1494 static const struct mtk_scp_of_data mt8188_of_data = {
1495 	.scp_clk_get = mt8195_scp_clk_get,
1496 	.scp_before_load = mt8188_scp_before_load,
1497 	.scp_irq_handler = mt8195_scp_irq_handler,
1498 	.scp_reset_assert = mt8192_scp_reset_assert,
1499 	.scp_reset_deassert = mt8192_scp_reset_deassert,
1500 	.scp_stop = mt8188_scp_stop,
1501 	.scp_da_to_va = mt8192_scp_da_to_va,
1502 	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1503 	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1504 	.scp_sizes = &mt8188_scp_sizes,
1505 };
1506 
1507 static const struct mtk_scp_of_data mt8188_of_data_c1 = {
1508 	.scp_clk_get = mt8195_scp_clk_get,
1509 	.scp_before_load = mt8188_scp_c1_before_load,
1510 	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1511 	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1512 	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1513 	.scp_stop = mt8188_scp_c1_stop,
1514 	.scp_da_to_va = mt8192_scp_da_to_va,
1515 	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1516 	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1517 	.scp_sizes = &mt8188_scp_c1_sizes,
1518 };
1519 
1520 static const struct mtk_scp_of_data mt8192_of_data = {
1521 	.scp_clk_get = mt8192_scp_clk_get,
1522 	.scp_before_load = mt8192_scp_before_load,
1523 	.scp_irq_handler = mt8192_scp_irq_handler,
1524 	.scp_reset_assert = mt8192_scp_reset_assert,
1525 	.scp_reset_deassert = mt8192_scp_reset_deassert,
1526 	.scp_stop = mt8192_scp_stop,
1527 	.scp_da_to_va = mt8192_scp_da_to_va,
1528 	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1529 	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1530 	.scp_sizes = &default_scp_sizes,
1531 };
1532 
1533 static const struct mtk_scp_of_data mt8195_of_data = {
1534 	.scp_clk_get = mt8195_scp_clk_get,
1535 	.scp_before_load = mt8195_scp_before_load,
1536 	.scp_irq_handler = mt8195_scp_irq_handler,
1537 	.scp_reset_assert = mt8192_scp_reset_assert,
1538 	.scp_reset_deassert = mt8192_scp_reset_deassert,
1539 	.scp_stop = mt8195_scp_stop,
1540 	.scp_da_to_va = mt8192_scp_da_to_va,
1541 	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1542 	.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
1543 	.scp_sizes = &mt8195_scp_sizes,
1544 };
1545 
1546 static const struct mtk_scp_of_data mt8195_of_data_c1 = {
1547 	.scp_clk_get = mt8195_scp_clk_get,
1548 	.scp_before_load = mt8195_scp_c1_before_load,
1549 	.scp_irq_handler = mt8195_scp_c1_irq_handler,
1550 	.scp_reset_assert = mt8195_scp_c1_reset_assert,
1551 	.scp_reset_deassert = mt8195_scp_c1_reset_deassert,
1552 	.scp_stop = mt8195_scp_c1_stop,
1553 	.scp_da_to_va = mt8192_scp_da_to_va,
1554 	.host_to_scp_reg = MT8192_GIPC_IN_SET,
1555 	.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
1556 	.scp_sizes = &default_scp_sizes,
1557 };
1558 
1559 static const struct mtk_scp_of_data *mt8188_of_data_cores[] = {
1560 	&mt8188_of_data,
1561 	&mt8188_of_data_c1,
1562 	NULL
1563 };
1564 
1565 static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
1566 	&mt8195_of_data,
1567 	&mt8195_of_data_c1,
1568 	NULL
1569 };
1570 
1571 static const struct of_device_id mtk_scp_of_match[] = {
1572 	{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
1573 	{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
1574 	{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
1575 	{ .compatible = "mediatek,mt8188-scp-dual", .data = &mt8188_of_data_cores },
1576 	{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
1577 	{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
1578 	{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
1579 	{},
1580 };
1581 MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
1582 
1583 static struct platform_driver mtk_scp_driver = {
1584 	.probe = scp_probe,
1585 	.remove = scp_remove,
1586 	.driver = {
1587 		.name = "mtk-scp",
1588 		.of_match_table = mtk_scp_of_match,
1589 	},
1590 };
1591 
1592 module_platform_driver(mtk_scp_driver);
1593 
1594 MODULE_LICENSE("GPL v2");
1595 MODULE_DESCRIPTION("MediaTek SCP control driver");
1596