xref: /linux/sound/soc/sof/amd/acp-loader.c (revision d0fde6aae2bacdc024fff43461ba0f325375fa97)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
7 //
8 // Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
9 
10 /*
11  * Hardware interface for ACP DSP Firmware binaries loader
12  */
13 
14 #include <linux/firmware.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 
18 #include "../ops.h"
19 #include "acp-dsp-offset.h"
20 #include "acp.h"
21 
22 #define FW_BIN			0
23 #define FW_DATA_BIN		1
24 #define FW_SRAM_DATA_BIN	2
25 
26 #define FW_BIN_PTE_OFFSET	0x00
27 #define FW_DATA_BIN_PTE_OFFSET	0x08
28 
29 #define ACP_DSP_RUN	0x00
30 
31 int acp_dsp_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
32 		       u32 offset, void *dest, size_t size)
33 {
34 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
35 	switch (blk_type) {
36 	case SOF_FW_BLK_TYPE_SRAM:
37 		offset = offset - desc->sram_pte_offset;
38 		memcpy_from_scratch(sdev, offset, dest, size);
39 		break;
40 	default:
41 		dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
42 		return -EINVAL;
43 	}
44 
45 	return 0;
46 }
47 EXPORT_SYMBOL_NS(acp_dsp_block_read, SND_SOC_SOF_AMD_COMMON);
48 
49 int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
50 			u32 offset, void *src, size_t size)
51 {
52 	struct pci_dev *pci = to_pci_dev(sdev->dev);
53 	struct acp_dev_data *adata;
54 	void *dest;
55 	u32 dma_size, page_count;
56 	unsigned int size_fw;
57 
58 	adata = sdev->pdata->hw_pdata;
59 
60 	switch (blk_type) {
61 	case SOF_FW_BLK_TYPE_IRAM:
62 		if (!adata->bin_buf) {
63 			size_fw = sdev->basefw.fw->size;
64 			page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
65 			dma_size = page_count * ACP_PAGE_SIZE;
66 			adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size,
67 							    &adata->sha_dma_addr,
68 							    GFP_ATOMIC);
69 			if (!adata->bin_buf)
70 				return -ENOMEM;
71 		}
72 		adata->fw_bin_size = size + offset;
73 		dest = adata->bin_buf + offset;
74 		break;
75 	case SOF_FW_BLK_TYPE_DRAM:
76 		if (!adata->data_buf) {
77 			adata->data_buf = dma_alloc_coherent(&pci->dev,
78 							     ACP_DEFAULT_DRAM_LENGTH,
79 							     &adata->dma_addr,
80 							     GFP_ATOMIC);
81 			if (!adata->data_buf)
82 				return -ENOMEM;
83 		}
84 		dest = adata->data_buf + offset;
85 		adata->fw_data_bin_size = size + offset;
86 		adata->is_dram_in_use = true;
87 		break;
88 	case SOF_FW_BLK_TYPE_SRAM:
89 		if (!adata->sram_data_buf) {
90 			adata->sram_data_buf = dma_alloc_coherent(&pci->dev,
91 								  ACP_DEFAULT_SRAM_LENGTH,
92 								  &adata->sram_dma_addr,
93 								  GFP_ATOMIC);
94 			if (!adata->sram_data_buf)
95 				return -ENOMEM;
96 		}
97 		adata->fw_sram_data_bin_size = size + offset;
98 		dest = adata->sram_data_buf + offset;
99 		adata->is_sram_in_use = true;
100 		break;
101 	default:
102 		dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
103 		return -EINVAL;
104 	}
105 
106 	memcpy(dest, src, size);
107 	return 0;
108 }
109 EXPORT_SYMBOL_NS(acp_dsp_block_write, SND_SOC_SOF_AMD_COMMON);
110 
111 int acp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
112 {
113 	return type;
114 }
115 EXPORT_SYMBOL_NS(acp_get_bar_index, SND_SOC_SOF_AMD_COMMON);
116 
117 static void configure_pte_for_fw_loading(int type, int num_pages, struct acp_dev_data *adata)
118 {
119 	struct snd_sof_dev *sdev = adata->dev;
120 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
121 	unsigned int low, high;
122 	dma_addr_t addr;
123 	u16 page_idx;
124 	u32 offset;
125 
126 	switch (type) {
127 	case FW_BIN:
128 		offset = FW_BIN_PTE_OFFSET;
129 		addr = adata->sha_dma_addr;
130 		break;
131 	case FW_DATA_BIN:
132 		offset = adata->fw_bin_page_count * 8;
133 		addr = adata->dma_addr;
134 		break;
135 	case FW_SRAM_DATA_BIN:
136 		offset = (adata->fw_bin_page_count + ACP_DRAM_PAGE_COUNT) * 8;
137 		addr = adata->sram_dma_addr;
138 		break;
139 	default:
140 		dev_err(sdev->dev, "Invalid data type %x\n", type);
141 		return;
142 	}
143 
144 	/* Group Enable */
145 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_BASE_ADDR_GRP_1,
146 			  desc->sram_pte_offset | BIT(31));
147 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1,
148 			  PAGE_SIZE_4K_ENABLE);
149 
150 	for (page_idx = 0; page_idx < num_pages; page_idx++) {
151 		low = lower_32_bits(addr);
152 		high = upper_32_bits(addr);
153 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
154 		high |= BIT(31);
155 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
156 		offset += 8;
157 		addr += PAGE_SIZE;
158 	}
159 
160 	/* Flush ATU Cache after PTE Update */
161 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
162 }
163 
164 /* pre fw run operations */
165 int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
166 {
167 	struct pci_dev *pci = to_pci_dev(sdev->dev);
168 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
169 	struct acp_dev_data *adata;
170 	unsigned int src_addr, size_fw, dest_addr;
171 	u32 page_count, dma_size;
172 	int ret;
173 
174 	adata = sdev->pdata->hw_pdata;
175 
176 	if (adata->signed_fw_image)
177 		size_fw = adata->fw_bin_size - ACP_FIRMWARE_SIGNATURE;
178 	else
179 		size_fw = adata->fw_bin_size;
180 
181 	page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
182 	adata->fw_bin_page_count = page_count;
183 
184 	configure_pte_for_fw_loading(FW_BIN, page_count, adata);
185 	ret = configure_and_run_sha_dma(adata, adata->bin_buf, ACP_SYSTEM_MEMORY_WINDOW,
186 					ACP_IRAM_BASE_ADDRESS, size_fw);
187 	if (ret < 0) {
188 		dev_err(sdev->dev, "SHA DMA transfer failed status: %d\n", ret);
189 		return ret;
190 	}
191 	if (adata->is_dram_in_use) {
192 		configure_pte_for_fw_loading(FW_DATA_BIN, ACP_DRAM_PAGE_COUNT, adata);
193 		src_addr = ACP_SYSTEM_MEMORY_WINDOW + (page_count * ACP_PAGE_SIZE);
194 		dest_addr = ACP_DRAM_BASE_ADDRESS;
195 
196 		ret = configure_and_run_dma(adata, src_addr, dest_addr, adata->fw_data_bin_size);
197 		if (ret < 0) {
198 			dev_err(sdev->dev, "acp dma configuration failed: %d\n", ret);
199 			return ret;
200 		}
201 		ret = acp_dma_status(adata, 0);
202 		if (ret < 0)
203 			dev_err(sdev->dev, "acp dma transfer status: %d\n", ret);
204 	}
205 	if (adata->is_sram_in_use) {
206 		configure_pte_for_fw_loading(FW_SRAM_DATA_BIN, ACP_SRAM_PAGE_COUNT, adata);
207 		src_addr = ACP_SYSTEM_MEMORY_WINDOW + ACP_DEFAULT_SRAM_LENGTH +
208 			   (page_count * ACP_PAGE_SIZE);
209 		dest_addr = ACP_SRAM_BASE_ADDRESS;
210 
211 		ret = configure_and_run_dma(adata, src_addr, dest_addr,
212 					    adata->fw_sram_data_bin_size);
213 		if (ret < 0) {
214 			dev_err(sdev->dev, "acp dma configuration failed: %d\n", ret);
215 			return ret;
216 		}
217 		ret = acp_dma_status(adata, 0);
218 		if (ret < 0)
219 			dev_err(sdev->dev, "acp dma transfer status: %d\n", ret);
220 	}
221 
222 	if (desc->rev > 3) {
223 		/* Cache Window enable */
224 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_OFFSET0, desc->sram_pte_offset);
225 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_SIZE0, SRAM1_SIZE | BIT(31));
226 	}
227 
228 	/* Free memory once DMA is complete */
229 	dma_size =  (PAGE_ALIGN(sdev->basefw.fw->size) >> PAGE_SHIFT) * ACP_PAGE_SIZE;
230 	dma_free_coherent(&pci->dev, dma_size, adata->bin_buf, adata->sha_dma_addr);
231 	adata->bin_buf = NULL;
232 	if (adata->is_dram_in_use) {
233 		dma_free_coherent(&pci->dev, ACP_DEFAULT_DRAM_LENGTH, adata->data_buf,
234 				  adata->dma_addr);
235 		adata->data_buf = NULL;
236 	}
237 	if (adata->is_sram_in_use) {
238 		dma_free_coherent(&pci->dev, ACP_DEFAULT_SRAM_LENGTH, adata->sram_data_buf,
239 				  adata->sram_dma_addr);
240 		adata->sram_data_buf = NULL;
241 	}
242 	return ret;
243 }
244 EXPORT_SYMBOL_NS(acp_dsp_pre_fw_run, SND_SOC_SOF_AMD_COMMON);
245 
246 int acp_sof_dsp_run(struct snd_sof_dev *sdev)
247 {
248 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
249 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
250 	int val;
251 
252 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL, ACP_DSP_RUN);
253 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL);
254 	dev_dbg(sdev->dev, "ACP_DSP0_RUNSTALL : 0x%0x\n", val);
255 
256 	/* Some platforms won't support fusion DSP,keep offset zero for no support */
257 	if (desc->fusion_dsp_offset && adata->enable_fw_debug) {
258 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset, ACP_DSP_RUN);
259 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset);
260 		dev_dbg(sdev->dev, "ACP_DSP0_FUSION_RUNSTALL : 0x%0x\n", val);
261 	}
262 	return 0;
263 }
264 EXPORT_SYMBOL_NS(acp_sof_dsp_run, SND_SOC_SOF_AMD_COMMON);
265 
266 int acp_sof_load_signed_firmware(struct snd_sof_dev *sdev)
267 {
268 	struct snd_sof_pdata *plat_data = sdev->pdata;
269 	struct acp_dev_data *adata = plat_data->hw_pdata;
270 	int ret;
271 
272 	ret = request_firmware(&sdev->basefw.fw, adata->fw_code_bin, sdev->dev);
273 	if (ret < 0) {
274 		dev_err(sdev->dev, "sof signed firmware code bin is missing\n");
275 		return ret;
276 	} else {
277 		dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_code_bin);
278 	}
279 	ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_IRAM, 0,
280 				      (void *)sdev->basefw.fw->data, sdev->basefw.fw->size);
281 
282 	ret = request_firmware(&adata->fw_dbin, adata->fw_data_bin, sdev->dev);
283 	if (ret < 0) {
284 		dev_err(sdev->dev, "sof signed firmware data bin is missing\n");
285 		return ret;
286 
287 	} else {
288 		dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_data_bin);
289 	}
290 
291 	ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_DRAM, 0,
292 				      (void *)adata->fw_dbin->data, adata->fw_dbin->size);
293 	return ret;
294 }
295 EXPORT_SYMBOL_NS(acp_sof_load_signed_firmware, SND_SOC_SOF_AMD_COMMON);
296