xref: /linux/sound/soc/sof/amd/acp.c (revision ff9f065318e17a1a97981d9e535fcfc6ce5d5614)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
7 //
8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
9 //	    Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
10 
11 /*
12  * Hardware interface for generic AMD ACP processor
13  */
14 
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 
19 #include "../ops.h"
20 #include "acp.h"
21 #include "acp-dsp-offset.h"
22 
23 static bool enable_fw_debug;
24 module_param(enable_fw_debug, bool, 0444);
25 MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
26 
27 static struct acp_quirk_entry quirk_valve_galileo = {
28 	.signed_fw_image = true,
29 	.skip_iram_dram_size_mod = true,
30 };
31 
32 const struct dmi_system_id acp_sof_quirk_table[] = {
33 	{
34 		/* Steam Deck OLED device */
35 		.matches = {
36 			DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
37 			DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
38 		},
39 		.driver_data = &quirk_valve_galileo,
40 	},
41 	{}
42 };
43 EXPORT_SYMBOL_GPL(acp_sof_quirk_table);
44 
45 static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data)
46 {
47 	pci_write_config_dword(dev, 0x60, smn_addr);
48 	pci_write_config_dword(dev, 0x64, data);
49 
50 	return 0;
51 }
52 
53 static int smn_read(struct pci_dev *dev, u32 smn_addr)
54 {
55 	u32 data = 0;
56 
57 	pci_write_config_dword(dev, 0x60, smn_addr);
58 	pci_read_config_dword(dev, 0x64, &data);
59 
60 	return data;
61 }
62 
63 static void init_dma_descriptor(struct acp_dev_data *adata)
64 {
65 	struct snd_sof_dev *sdev = adata->dev;
66 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
67 	unsigned int addr;
68 
69 	addr = desc->sram_pte_offset + sdev->debug_box.offset +
70 	       offsetof(struct scratch_reg_conf, dma_desc);
71 
72 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr);
73 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT);
74 }
75 
76 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx,
77 				     struct dma_descriptor *dscr_info)
78 {
79 	struct snd_sof_dev *sdev = adata->dev;
80 	unsigned int offset;
81 
82 	offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset +
83 		offsetof(struct scratch_reg_conf, dma_desc) +
84 		idx * sizeof(struct dma_descriptor);
85 
86 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr);
87 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr);
88 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all);
89 }
90 
91 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
92 			      unsigned int idx, unsigned int dscr_count)
93 {
94 	struct snd_sof_dev *sdev = adata->dev;
95 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
96 	unsigned int val, status;
97 	int ret;
98 
99 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32),
100 			  ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN);
101 
102 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val,
103 					    val & (1 << ch), ACP_REG_POLL_INTERVAL,
104 					    ACP_REG_POLL_TIMEOUT_US);
105 	if (ret < 0) {
106 		status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat);
107 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
108 
109 		dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
110 		return ret;
111 	}
112 
113 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0);
114 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count);
115 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx);
116 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0);
117 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN);
118 
119 	return ret;
120 }
121 
122 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch,
123 			    unsigned int dscr_count, struct dma_descriptor *dscr_info)
124 {
125 	struct snd_sof_dev *sdev = adata->dev;
126 	int ret;
127 	u16 dscr;
128 
129 	if (!dscr_info || !dscr_count)
130 		return -EINVAL;
131 
132 	for (dscr = 0; dscr < dscr_count; dscr++)
133 		configure_dma_descriptor(adata, dscr, dscr_info++);
134 
135 	ret = config_dma_channel(adata, ch, 0, dscr_count);
136 	if (ret < 0)
137 		dev_err(sdev->dev, "config dma ch failed:%d\n", ret);
138 
139 	return ret;
140 }
141 
142 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
143 			  unsigned int dest_addr, int dsp_data_size)
144 {
145 	struct snd_sof_dev *sdev = adata->dev;
146 	unsigned int desc_count, index;
147 	int ret;
148 
149 	for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0;
150 	     desc_count++, dsp_data_size -= ACP_PAGE_SIZE) {
151 		adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE;
152 		adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE;
153 		adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE;
154 		if (dsp_data_size < ACP_PAGE_SIZE)
155 			adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size;
156 	}
157 
158 	ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info);
159 	if (ret)
160 		dev_err(sdev->dev, "acpbus_dma_start failed\n");
161 
162 	/* Clear descriptor array */
163 	for (index = 0; index < desc_count; index++)
164 		memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor));
165 
166 	return ret;
167 }
168 
169 /*
170  * psp_mbox_ready- function to poll ready bit of psp mbox
171  * @adata: acp device data
172  * @ack: bool variable to check ready bit status or psp ack
173  */
174 
175 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack)
176 {
177 	struct snd_sof_dev *sdev = adata->dev;
178 	int ret;
179 	u32 data;
180 
181 	ret = read_poll_timeout(smn_read, data, data & MBOX_READY_MASK, MBOX_DELAY_US,
182 				ACP_PSP_TIMEOUT_US, false, adata->smn_dev, MP0_C2PMSG_114_REG);
183 	if (!ret)
184 		return 0;
185 
186 	dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK);
187 
188 	if (ack)
189 		return -ETIMEDOUT;
190 
191 	return -EBUSY;
192 }
193 
194 /*
195  * psp_send_cmd - function to send psp command over mbox
196  * @adata: acp device data
197  * @cmd: non zero integer value for command type
198  */
199 
200 static int psp_send_cmd(struct acp_dev_data *adata, int cmd)
201 {
202 	struct snd_sof_dev *sdev = adata->dev;
203 	int ret;
204 	u32 data;
205 
206 	if (!cmd)
207 		return -EINVAL;
208 
209 	/* Get a non-zero Doorbell value from PSP */
210 	ret = read_poll_timeout(smn_read, data, data, MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false,
211 				adata->smn_dev, MP0_C2PMSG_73_REG);
212 
213 	if (ret) {
214 		dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG);
215 		return ret;
216 	}
217 
218 	/* Check if PSP is ready for new command */
219 	ret = psp_mbox_ready(adata, 0);
220 	if (ret)
221 		return ret;
222 
223 	smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd);
224 
225 	/* Ring the Doorbell for PSP */
226 	smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data);
227 
228 	/* Check MBOX ready as PSP ack */
229 	ret = psp_mbox_ready(adata, 1);
230 
231 	return ret;
232 }
233 
234 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
235 			      unsigned int start_addr, unsigned int dest_addr,
236 			      unsigned int image_length)
237 {
238 	struct snd_sof_dev *sdev = adata->dev;
239 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
240 	unsigned int tx_count, fw_qualifier, val;
241 	int ret;
242 
243 	if (!image_addr) {
244 		dev_err(sdev->dev, "SHA DMA image address is NULL\n");
245 		return -EINVAL;
246 	}
247 
248 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD);
249 	if (val & ACP_SHA_RUN) {
250 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET);
251 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS,
252 						    val, val & ACP_SHA_RESET,
253 						    ACP_REG_POLL_INTERVAL,
254 						    ACP_REG_POLL_TIMEOUT_US);
255 		if (ret < 0) {
256 			dev_err(sdev->dev, "SHA DMA Failed to Reset\n");
257 			return ret;
258 		}
259 	}
260 
261 	if (adata->quirks && adata->quirks->signed_fw_image)
262 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
263 
264 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
265 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
266 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
267 
268 	/* psp_send_cmd only required for vangogh platform (rev - 5) */
269 	if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
270 		/* Modify IRAM and DRAM size */
271 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
272 		if (ret)
273 			return ret;
274 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
275 		if (ret)
276 			return ret;
277 	}
278 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
279 
280 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
281 					    tx_count, tx_count == image_length,
282 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
283 	if (ret < 0) {
284 		dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count);
285 		return ret;
286 	}
287 
288 	/* psp_send_cmd only required for renoir platform (rev - 3) */
289 	if (desc->rev == 3) {
290 		ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
296 					    fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
297 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
298 	if (ret < 0) {
299 		dev_err(sdev->dev, "PSP validation failed\n");
300 		return ret;
301 	}
302 
303 	return 0;
304 }
305 
306 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch)
307 {
308 	struct snd_sof_dev *sdev = adata->dev;
309 	unsigned int val;
310 	int ret = 0;
311 
312 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32));
313 	if (val & ACP_DMA_CH_RUN) {
314 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val,
315 						    ACP_REG_POLL_INTERVAL,
316 						    ACP_DMA_COMPLETE_TIMEOUT_US);
317 		if (ret < 0)
318 			dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch);
319 	}
320 
321 	return ret;
322 }
323 
324 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes)
325 {
326 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
327 	int i, j;
328 
329 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
330 		dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i);
331 }
332 
333 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes)
334 {
335 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
336 	int i, j;
337 
338 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
339 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]);
340 }
341 
342 static int acp_memory_init(struct snd_sof_dev *sdev)
343 {
344 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
345 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
346 
347 	snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET,
348 				ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK);
349 	init_dma_descriptor(adata);
350 
351 	return 0;
352 }
353 
354 static irqreturn_t acp_irq_thread(int irq, void *context)
355 {
356 	struct snd_sof_dev *sdev = context;
357 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
358 	unsigned int count = ACP_HW_SEM_RETRY_COUNT;
359 
360 	spin_lock_irq(&sdev->ipc_lock);
361 	/* Wait until acquired HW Semaphore lock or timeout */
362 	while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset) && --count)
363 		;
364 	spin_unlock_irq(&sdev->ipc_lock);
365 
366 	if (!count) {
367 		dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
368 		return IRQ_NONE;
369 	}
370 
371 	sof_ops(sdev)->irq_thread(irq, sdev);
372 	/* Unlock or Release HW Semaphore */
373 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
374 
375 	return IRQ_HANDLED;
376 };
377 
378 static irqreturn_t acp_irq_handler(int irq, void *dev_id)
379 {
380 	struct amd_sdw_manager *amd_manager;
381 	struct snd_sof_dev *sdev = dev_id;
382 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
383 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
384 	unsigned int base = desc->dsp_intr_base;
385 	unsigned int val;
386 	int irq_flag = 0;
387 
388 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
389 	if (val & ACP_DSP_TO_HOST_IRQ) {
390 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
391 				  ACP_DSP_TO_HOST_IRQ);
392 		return IRQ_WAKE_THREAD;
393 	}
394 
395 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
396 	if (val & ACP_SDW0_IRQ_MASK) {
397 		amd_manager = dev_get_drvdata(&adata->sdw->pdev[0]->dev);
398 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_SDW0_IRQ_MASK);
399 		if (amd_manager)
400 			schedule_work(&amd_manager->amd_sdw_irq_thread);
401 		irq_flag = 1;
402 	}
403 
404 	if (val & ACP_ERROR_IRQ_MASK) {
405 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK);
406 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0);
407 		/* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */
408 		if (desc->rev >= 6)
409 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0);
410 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0);
411 		irq_flag = 1;
412 	}
413 
414 	if (desc->ext_intr_stat1) {
415 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat1);
416 		if (val & ACP_SDW1_IRQ_MASK) {
417 			amd_manager = dev_get_drvdata(&adata->sdw->pdev[1]->dev);
418 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1,
419 					  ACP_SDW1_IRQ_MASK);
420 			if (amd_manager)
421 				schedule_work(&amd_manager->amd_sdw_irq_thread);
422 			irq_flag = 1;
423 		}
424 	}
425 	if (irq_flag)
426 		return IRQ_HANDLED;
427 	else
428 		return IRQ_NONE;
429 }
430 
431 static int acp_power_on(struct snd_sof_dev *sdev)
432 {
433 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
434 	unsigned int base = desc->pgfsm_base;
435 	unsigned int val;
436 	int ret;
437 
438 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
439 
440 	if (val == ACP_POWERED_ON)
441 		return 0;
442 
443 	if (val & ACP_PGFSM_STATUS_MASK)
444 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
445 				  ACP_PGFSM_CNTL_POWER_ON_MASK);
446 
447 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
448 					    !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
449 	if (ret < 0)
450 		dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n");
451 
452 	return ret;
453 }
454 
455 static int acp_reset(struct snd_sof_dev *sdev)
456 {
457 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
458 	unsigned int val;
459 	int ret;
460 
461 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET);
462 
463 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
464 					    val & ACP_SOFT_RESET_DONE_MASK,
465 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
466 	if (ret < 0) {
467 		dev_err(sdev->dev, "timeout asserting reset\n");
468 		return ret;
469 	}
470 
471 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET);
472 
473 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
474 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
475 	if (ret < 0)
476 		dev_err(sdev->dev, "timeout in releasing reset\n");
477 
478 	if (desc->acp_clkmux_sel)
479 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK);
480 
481 	if (desc->ext_intr_enb)
482 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01);
483 
484 	if (desc->ext_intr_cntl)
485 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_cntl, ACP_ERROR_IRQ_MASK);
486 	return ret;
487 }
488 
489 static int acp_dsp_reset(struct snd_sof_dev *sdev)
490 {
491 	unsigned int val;
492 	int ret;
493 
494 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_ASSERT_RESET);
495 
496 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
497 					    val & ACP_DSP_SOFT_RESET_DONE_MASK,
498 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
499 	if (ret < 0) {
500 		dev_err(sdev->dev, "timeout asserting reset\n");
501 		return ret;
502 	}
503 
504 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_RELEASE_RESET);
505 
506 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
507 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
508 	if (ret < 0)
509 		dev_err(sdev->dev, "timeout in releasing reset\n");
510 
511 	return ret;
512 }
513 
514 static int acp_init(struct snd_sof_dev *sdev)
515 {
516 	int ret;
517 
518 	/* power on */
519 	ret = acp_power_on(sdev);
520 	if (ret) {
521 		dev_err(sdev->dev, "ACP power on failed\n");
522 		return ret;
523 	}
524 
525 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
526 	/* Reset */
527 	return acp_reset(sdev);
528 }
529 
530 static bool check_acp_sdw_enable_status(struct snd_sof_dev *sdev)
531 {
532 	struct acp_dev_data *acp_data;
533 	u32 sdw0_en, sdw1_en;
534 
535 	acp_data = sdev->pdata->hw_pdata;
536 	if (!acp_data->sdw)
537 		return false;
538 
539 	sdw0_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW0_EN);
540 	sdw1_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW1_EN);
541 	acp_data->sdw_en_stat = sdw0_en || sdw1_en;
542 	return acp_data->sdw_en_stat;
543 }
544 
545 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
546 {
547 	int ret;
548 
549 	/* When acp_reset() function is invoked, it will apply ACP SOFT reset and
550 	 * DSP reset. ACP Soft reset sequence will cause all ACP IP registers will
551 	 * be reset to default values which will break the ClockStop Mode functionality.
552 	 * Add a condition check to apply DSP reset when SoundWire ClockStop mode
553 	 * is selected. For the rest of the scenarios, apply acp reset sequence.
554 	 */
555 	if (check_acp_sdw_enable_status(sdev))
556 		return acp_dsp_reset(sdev);
557 
558 	ret = acp_reset(sdev);
559 	if (ret) {
560 		dev_err(sdev->dev, "ACP Reset failed\n");
561 		return ret;
562 	}
563 
564 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00);
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON);
569 
570 int amd_sof_acp_resume(struct snd_sof_dev *sdev)
571 {
572 	int ret;
573 	struct acp_dev_data *acp_data;
574 
575 	acp_data = sdev->pdata->hw_pdata;
576 	if (!acp_data->sdw_en_stat) {
577 		ret = acp_init(sdev);
578 		if (ret) {
579 			dev_err(sdev->dev, "ACP Init failed\n");
580 			return ret;
581 		}
582 		return acp_memory_init(sdev);
583 	} else {
584 		return acp_dsp_reset(sdev);
585 	}
586 }
587 EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON);
588 
589 #if IS_ENABLED(CONFIG_SND_SOC_SOF_AMD_SOUNDWIRE)
590 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
591 {
592 	struct acpi_device *sdw_dev;
593 	struct acp_dev_data *acp_data;
594 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
595 
596 	if (!addr)
597 		return -ENODEV;
598 
599 	acp_data = sdev->pdata->hw_pdata;
600 	sdw_dev = acpi_find_child_device(ACPI_COMPANION(sdev->dev), addr, 0);
601 	if (!sdw_dev)
602 		return -ENODEV;
603 
604 	acp_data->info.handle = sdw_dev->handle;
605 	acp_data->info.count = desc->sdw_max_link_count;
606 
607 	return amd_sdw_scan_controller(&acp_data->info);
608 }
609 
610 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
611 {
612 	struct acp_dev_data *acp_data;
613 	struct sdw_amd_res sdw_res;
614 	int ret;
615 
616 	acp_data = sdev->pdata->hw_pdata;
617 
618 	memset(&sdw_res, 0, sizeof(sdw_res));
619 	sdw_res.addr = acp_data->addr;
620 	sdw_res.reg_range = acp_data->reg_range;
621 	sdw_res.handle = acp_data->info.handle;
622 	sdw_res.parent = sdev->dev;
623 	sdw_res.dev = sdev->dev;
624 	sdw_res.acp_lock = &acp_data->acp_lock;
625 	sdw_res.count = acp_data->info.count;
626 	sdw_res.link_mask = acp_data->info.link_mask;
627 	sdw_res.mmio_base = sdev->bar[ACP_DSP_BAR];
628 
629 	ret = sdw_amd_probe(&sdw_res, &acp_data->sdw);
630 	if (ret)
631 		dev_err(sdev->dev, "SoundWire probe failed\n");
632 	return ret;
633 }
634 
635 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
636 {
637 	struct acp_dev_data *acp_data;
638 
639 	acp_data = sdev->pdata->hw_pdata;
640 	if (acp_data->sdw)
641 		sdw_amd_exit(acp_data->sdw);
642 	acp_data->sdw = NULL;
643 
644 	return 0;
645 }
646 
647 #else
648 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
649 {
650 	return 0;
651 }
652 
653 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
654 {
655 	return 0;
656 }
657 
658 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
659 {
660 	return 0;
661 }
662 #endif
663 
664 int amd_sof_acp_probe(struct snd_sof_dev *sdev)
665 {
666 	struct pci_dev *pci = to_pci_dev(sdev->dev);
667 	struct acp_dev_data *adata;
668 	const struct sof_amd_acp_desc *chip;
669 	const struct dmi_system_id *dmi_id;
670 	unsigned int addr;
671 	int ret;
672 
673 	chip = get_chip_info(sdev->pdata);
674 	if (!chip) {
675 		dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device);
676 		return -EIO;
677 	}
678 	adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data),
679 			     GFP_KERNEL);
680 	if (!adata)
681 		return -ENOMEM;
682 
683 	adata->dev = sdev;
684 	adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
685 							PLATFORM_DEVID_NONE, NULL, 0);
686 	if (IS_ERR(adata->dmic_dev)) {
687 		dev_err(sdev->dev, "failed to register platform for dmic codec\n");
688 		return PTR_ERR(adata->dmic_dev);
689 	}
690 	addr = pci_resource_start(pci, ACP_DSP_BAR);
691 	sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR));
692 	if (!sdev->bar[ACP_DSP_BAR]) {
693 		dev_err(sdev->dev, "ioremap error\n");
694 		ret = -ENXIO;
695 		goto unregister_dev;
696 	}
697 
698 	pci_set_master(pci);
699 	adata->addr = addr;
700 	adata->reg_range = chip->reg_end_addr - chip->reg_start_addr;
701 	mutex_init(&adata->acp_lock);
702 	sdev->pdata->hw_pdata = adata;
703 	adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL);
704 	if (!adata->smn_dev) {
705 		dev_err(sdev->dev, "Failed to get host bridge device\n");
706 		ret = -ENODEV;
707 		goto unregister_dev;
708 	}
709 
710 	ret = acp_init(sdev);
711 	if (ret < 0)
712 		goto free_smn_dev;
713 
714 	sdev->ipc_irq = pci->irq;
715 	ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
716 				   IRQF_SHARED, "AudioDSP", sdev);
717 	if (ret < 0) {
718 		dev_err(sdev->dev, "failed to register IRQ %d\n",
719 			sdev->ipc_irq);
720 		goto free_smn_dev;
721 	}
722 
723 	/* scan SoundWire capabilities exposed by DSDT */
724 	ret = acp_sof_scan_sdw_devices(sdev, chip->sdw_acpi_dev_addr);
725 	if (ret < 0) {
726 		dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
727 		goto skip_soundwire;
728 	}
729 	ret = amd_sof_sdw_probe(sdev);
730 	if (ret < 0) {
731 		dev_err(sdev->dev, "error: SoundWire probe error\n");
732 		free_irq(sdev->ipc_irq, sdev);
733 		pci_dev_put(adata->smn_dev);
734 		return ret;
735 	}
736 
737 skip_soundwire:
738 	sdev->dsp_box.offset = 0;
739 	sdev->dsp_box.size = BOX_SIZE_512;
740 
741 	sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size;
742 	sdev->host_box.size = BOX_SIZE_512;
743 
744 	sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
745 	sdev->debug_box.size = BOX_SIZE_1024;
746 
747 	dmi_id = dmi_first_match(acp_sof_quirk_table);
748 	if (dmi_id) {
749 		adata->quirks = dmi_id->driver_data;
750 
751 		if (adata->quirks->signed_fw_image) {
752 			adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
753 							    "sof-%s-code.bin",
754 							    chip->name);
755 			if (!adata->fw_code_bin) {
756 				ret = -ENOMEM;
757 				goto free_ipc_irq;
758 			}
759 
760 			adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
761 							    "sof-%s-data.bin",
762 							    chip->name);
763 			if (!adata->fw_data_bin) {
764 				ret = -ENOMEM;
765 				goto free_ipc_irq;
766 			}
767 		}
768 	}
769 
770 	adata->enable_fw_debug = enable_fw_debug;
771 	acp_memory_init(sdev);
772 
773 	acp_dsp_stream_init(sdev);
774 
775 	return 0;
776 
777 free_ipc_irq:
778 	free_irq(sdev->ipc_irq, sdev);
779 free_smn_dev:
780 	pci_dev_put(adata->smn_dev);
781 unregister_dev:
782 	platform_device_unregister(adata->dmic_dev);
783 	return ret;
784 }
785 EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON);
786 
787 void amd_sof_acp_remove(struct snd_sof_dev *sdev)
788 {
789 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
790 
791 	if (adata->smn_dev)
792 		pci_dev_put(adata->smn_dev);
793 
794 	if (adata->sdw)
795 		amd_sof_sdw_exit(sdev);
796 
797 	if (sdev->ipc_irq)
798 		free_irq(sdev->ipc_irq, sdev);
799 
800 	if (adata->dmic_dev)
801 		platform_device_unregister(adata->dmic_dev);
802 
803 	acp_reset(sdev);
804 }
805 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
806 
807 MODULE_LICENSE("Dual BSD/GPL");
808 MODULE_DESCRIPTION("AMD ACP sof driver");
809 MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
810 MODULE_IMPORT_NS(SND_AMD_SOUNDWIRE_ACPI);
811