xref: /linux/sound/soc/sof/amd/acp.c (revision e03ad65cea610b24c6991aebf432d5c6824cd002)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
7 //
8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
9 //	    Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
10 
11 /*
12  * Hardware interface for generic AMD ACP processor
13  */
14 
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 
19 #include "../ops.h"
20 #include "acp.h"
21 #include "acp-dsp-offset.h"
22 
23 static bool enable_fw_debug;
24 module_param(enable_fw_debug, bool, 0444);
25 MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
26 
27 static struct acp_quirk_entry quirk_valve_galileo = {
28 	.signed_fw_image = true,
29 	.skip_iram_dram_size_mod = true,
30 };
31 
32 const struct dmi_system_id acp_sof_quirk_table[] = {
33 	{
34 		/* Steam Deck OLED device */
35 		.matches = {
36 			DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
37 			DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
38 		},
39 		.driver_data = &quirk_valve_galileo,
40 	},
41 	{}
42 };
43 EXPORT_SYMBOL_GPL(acp_sof_quirk_table);
44 
45 static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data)
46 {
47 	pci_write_config_dword(dev, 0x60, smn_addr);
48 	pci_write_config_dword(dev, 0x64, data);
49 
50 	return 0;
51 }
52 
53 static int smn_read(struct pci_dev *dev, u32 smn_addr)
54 {
55 	u32 data = 0;
56 
57 	pci_write_config_dword(dev, 0x60, smn_addr);
58 	pci_read_config_dword(dev, 0x64, &data);
59 
60 	return data;
61 }
62 
63 static void init_dma_descriptor(struct acp_dev_data *adata)
64 {
65 	struct snd_sof_dev *sdev = adata->dev;
66 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
67 	unsigned int addr;
68 
69 	addr = desc->sram_pte_offset + sdev->debug_box.offset +
70 	       offsetof(struct scratch_reg_conf, dma_desc);
71 
72 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr);
73 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT);
74 }
75 
76 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx,
77 				     struct dma_descriptor *dscr_info)
78 {
79 	struct snd_sof_dev *sdev = adata->dev;
80 	unsigned int offset;
81 
82 	offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset +
83 		offsetof(struct scratch_reg_conf, dma_desc) +
84 		idx * sizeof(struct dma_descriptor);
85 
86 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr);
87 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr);
88 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all);
89 }
90 
91 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
92 			      unsigned int idx, unsigned int dscr_count)
93 {
94 	struct snd_sof_dev *sdev = adata->dev;
95 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
96 	unsigned int val, status;
97 	int ret;
98 
99 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32),
100 			  ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN);
101 
102 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val,
103 					    val & (1 << ch), ACP_REG_POLL_INTERVAL,
104 					    ACP_REG_POLL_TIMEOUT_US);
105 	if (ret < 0) {
106 		status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat);
107 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
108 
109 		dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
110 		return ret;
111 	}
112 
113 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0);
114 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count);
115 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx);
116 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0);
117 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN);
118 
119 	return ret;
120 }
121 
122 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch,
123 			    unsigned int dscr_count, struct dma_descriptor *dscr_info)
124 {
125 	struct snd_sof_dev *sdev = adata->dev;
126 	int ret;
127 	u16 dscr;
128 
129 	if (!dscr_info || !dscr_count)
130 		return -EINVAL;
131 
132 	for (dscr = 0; dscr < dscr_count; dscr++)
133 		configure_dma_descriptor(adata, dscr, dscr_info++);
134 
135 	ret = config_dma_channel(adata, ch, 0, dscr_count);
136 	if (ret < 0)
137 		dev_err(sdev->dev, "config dma ch failed:%d\n", ret);
138 
139 	return ret;
140 }
141 
142 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
143 			  unsigned int dest_addr, int dsp_data_size)
144 {
145 	struct snd_sof_dev *sdev = adata->dev;
146 	unsigned int desc_count, index;
147 	int ret;
148 
149 	for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0;
150 	     desc_count++, dsp_data_size -= ACP_PAGE_SIZE) {
151 		adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE;
152 		adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE;
153 		adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE;
154 		if (dsp_data_size < ACP_PAGE_SIZE)
155 			adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size;
156 	}
157 
158 	ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info);
159 	if (ret)
160 		dev_err(sdev->dev, "acpbus_dma_start failed\n");
161 
162 	/* Clear descriptor array */
163 	for (index = 0; index < desc_count; index++)
164 		memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor));
165 
166 	return ret;
167 }
168 
169 /*
170  * psp_mbox_ready- function to poll ready bit of psp mbox
171  * @adata: acp device data
172  * @ack: bool variable to check ready bit status or psp ack
173  */
174 
175 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack)
176 {
177 	struct snd_sof_dev *sdev = adata->dev;
178 	int ret;
179 	u32 data;
180 
181 	ret = read_poll_timeout(smn_read, data, data & MBOX_READY_MASK, MBOX_DELAY_US,
182 				ACP_PSP_TIMEOUT_US, false, adata->smn_dev, MP0_C2PMSG_114_REG);
183 	if (!ret)
184 		return 0;
185 
186 	dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK);
187 
188 	if (ack)
189 		return -ETIMEDOUT;
190 
191 	return -EBUSY;
192 }
193 
194 /*
195  * psp_send_cmd - function to send psp command over mbox
196  * @adata: acp device data
197  * @cmd: non zero integer value for command type
198  */
199 
200 static int psp_send_cmd(struct acp_dev_data *adata, int cmd)
201 {
202 	struct snd_sof_dev *sdev = adata->dev;
203 	int ret;
204 	u32 data;
205 
206 	if (!cmd)
207 		return -EINVAL;
208 
209 	/* Get a non-zero Doorbell value from PSP */
210 	ret = read_poll_timeout(smn_read, data, data, MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false,
211 				adata->smn_dev, MP0_C2PMSG_73_REG);
212 
213 	if (ret) {
214 		dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG);
215 		return ret;
216 	}
217 
218 	/* Check if PSP is ready for new command */
219 	ret = psp_mbox_ready(adata, 0);
220 	if (ret)
221 		return ret;
222 
223 	smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd);
224 
225 	/* Ring the Doorbell for PSP */
226 	smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data);
227 
228 	/* Check MBOX ready as PSP ack */
229 	ret = psp_mbox_ready(adata, 1);
230 
231 	return ret;
232 }
233 
234 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
235 			      unsigned int start_addr, unsigned int dest_addr,
236 			      unsigned int image_length)
237 {
238 	struct snd_sof_dev *sdev = adata->dev;
239 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
240 	unsigned int tx_count, fw_qualifier, val;
241 	int ret;
242 
243 	if (!image_addr) {
244 		dev_err(sdev->dev, "SHA DMA image address is NULL\n");
245 		return -EINVAL;
246 	}
247 
248 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD);
249 	if (val & ACP_SHA_RUN) {
250 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET);
251 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS,
252 						    val, val & ACP_SHA_RESET,
253 						    ACP_REG_POLL_INTERVAL,
254 						    ACP_REG_POLL_TIMEOUT_US);
255 		if (ret < 0) {
256 			dev_err(sdev->dev, "SHA DMA Failed to Reset\n");
257 			return ret;
258 		}
259 	}
260 
261 	if (adata->quirks && adata->quirks->signed_fw_image)
262 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
263 
264 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
265 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
266 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
267 
268 	/* psp_send_cmd only required for vangogh platform (rev - 5) */
269 	if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
270 		/* Modify IRAM and DRAM size */
271 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
272 		if (ret)
273 			return ret;
274 		ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
275 		if (ret)
276 			return ret;
277 	}
278 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
279 
280 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
281 					    tx_count, tx_count == image_length,
282 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
283 	if (ret < 0) {
284 		dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count);
285 		return ret;
286 	}
287 
288 	/* psp_send_cmd only required for renoir platform (rev - 3) */
289 	if (desc->rev == 3) {
290 		ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND);
291 		if (ret)
292 			return ret;
293 	}
294 
295 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
296 					    fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
297 					    ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
298 	if (ret < 0) {
299 		dev_err(sdev->dev, "PSP validation failed\n");
300 		return ret;
301 	}
302 
303 	return 0;
304 }
305 
306 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch)
307 {
308 	struct snd_sof_dev *sdev = adata->dev;
309 	unsigned int val;
310 	int ret = 0;
311 
312 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32));
313 	if (val & ACP_DMA_CH_RUN) {
314 		ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val,
315 						    ACP_REG_POLL_INTERVAL,
316 						    ACP_DMA_COMPLETE_TIMEOUT_US);
317 		if (ret < 0)
318 			dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch);
319 	}
320 
321 	return ret;
322 }
323 
324 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes)
325 {
326 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
327 	int i, j;
328 
329 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
330 		dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i);
331 }
332 
333 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes)
334 {
335 	unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
336 	int i, j;
337 
338 	for (i = 0, j = 0; i < bytes; i = i + 4, j++)
339 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]);
340 }
341 
342 static int acp_memory_init(struct snd_sof_dev *sdev)
343 {
344 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
345 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
346 
347 	snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET,
348 				ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK);
349 	init_dma_descriptor(adata);
350 
351 	return 0;
352 }
353 
354 static irqreturn_t acp_irq_thread(int irq, void *context)
355 {
356 	struct snd_sof_dev *sdev = context;
357 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
358 	unsigned int count = ACP_HW_SEM_RETRY_COUNT;
359 
360 	spin_lock_irq(&sdev->ipc_lock);
361 	/* Wait until acquired HW Semaphore lock or timeout */
362 	while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset) && --count)
363 		;
364 	spin_unlock_irq(&sdev->ipc_lock);
365 
366 	if (!count) {
367 		dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
368 		return IRQ_NONE;
369 	}
370 
371 	sof_ops(sdev)->irq_thread(irq, sdev);
372 	/* Unlock or Release HW Semaphore */
373 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
374 
375 	return IRQ_HANDLED;
376 };
377 
378 static irqreturn_t acp_irq_handler(int irq, void *dev_id)
379 {
380 	struct amd_sdw_manager *amd_manager;
381 	struct snd_sof_dev *sdev = dev_id;
382 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
383 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
384 	unsigned int base = desc->dsp_intr_base;
385 	unsigned int val;
386 	int irq_flag = 0;
387 
388 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
389 	if (val & ACP_DSP_TO_HOST_IRQ) {
390 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
391 				  ACP_DSP_TO_HOST_IRQ);
392 		return IRQ_WAKE_THREAD;
393 	}
394 
395 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
396 	if (val & ACP_SDW0_IRQ_MASK) {
397 		amd_manager = dev_get_drvdata(&adata->sdw->pdev[0]->dev);
398 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_SDW0_IRQ_MASK);
399 		if (amd_manager)
400 			schedule_work(&amd_manager->amd_sdw_irq_thread);
401 		irq_flag = 1;
402 	}
403 
404 	if (val & ACP_ERROR_IRQ_MASK) {
405 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK);
406 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0);
407 		/* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */
408 		if (desc->rev >= 6)
409 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0);
410 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0);
411 		irq_flag = 1;
412 	}
413 
414 	if (desc->ext_intr_stat1) {
415 		val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat1);
416 		if (val & ACP_SDW1_IRQ_MASK) {
417 			amd_manager = dev_get_drvdata(&adata->sdw->pdev[1]->dev);
418 			snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat1,
419 					  ACP_SDW1_IRQ_MASK);
420 			if (amd_manager)
421 				schedule_work(&amd_manager->amd_sdw_irq_thread);
422 			irq_flag = 1;
423 		}
424 	}
425 	if (irq_flag)
426 		return IRQ_HANDLED;
427 	else
428 		return IRQ_NONE;
429 }
430 
431 static int acp_power_on(struct snd_sof_dev *sdev)
432 {
433 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
434 	unsigned int base = desc->pgfsm_base;
435 	unsigned int val;
436 	unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
437 	int ret;
438 
439 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
440 
441 	if (val == ACP_POWERED_ON)
442 		return 0;
443 
444 	switch (desc->rev) {
445 	case 3:
446 	case 5:
447 		acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
448 		acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
449 		break;
450 	case 6:
451 		acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
452 		acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
453 		break;
454 	default:
455 		return -EINVAL;
456 	}
457 
458 	if (val & acp_pgfsm_status_mask)
459 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
460 				  acp_pgfsm_cntl_mask);
461 
462 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
463 					    !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
464 	if (ret < 0)
465 		dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n");
466 
467 	return ret;
468 }
469 
470 static int acp_reset(struct snd_sof_dev *sdev)
471 {
472 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
473 	unsigned int val;
474 	int ret;
475 
476 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET);
477 
478 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
479 					    val & ACP_SOFT_RESET_DONE_MASK,
480 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
481 	if (ret < 0) {
482 		dev_err(sdev->dev, "timeout asserting reset\n");
483 		return ret;
484 	}
485 
486 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET);
487 
488 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
489 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
490 	if (ret < 0)
491 		dev_err(sdev->dev, "timeout in releasing reset\n");
492 
493 	if (desc->acp_clkmux_sel)
494 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK);
495 
496 	if (desc->ext_intr_enb)
497 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01);
498 
499 	if (desc->ext_intr_cntl)
500 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_cntl, ACP_ERROR_IRQ_MASK);
501 	return ret;
502 }
503 
504 static int acp_dsp_reset(struct snd_sof_dev *sdev)
505 {
506 	unsigned int val;
507 	int ret;
508 
509 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_ASSERT_RESET);
510 
511 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
512 					    val & ACP_DSP_SOFT_RESET_DONE_MASK,
513 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
514 	if (ret < 0) {
515 		dev_err(sdev->dev, "timeout asserting reset\n");
516 		return ret;
517 	}
518 
519 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_DSP_RELEASE_RESET);
520 
521 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
522 					    ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
523 	if (ret < 0)
524 		dev_err(sdev->dev, "timeout in releasing reset\n");
525 
526 	return ret;
527 }
528 
529 static int acp_init(struct snd_sof_dev *sdev)
530 {
531 	int ret;
532 
533 	/* power on */
534 	ret = acp_power_on(sdev);
535 	if (ret) {
536 		dev_err(sdev->dev, "ACP power on failed\n");
537 		return ret;
538 	}
539 
540 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
541 	/* Reset */
542 	return acp_reset(sdev);
543 }
544 
545 static bool check_acp_sdw_enable_status(struct snd_sof_dev *sdev)
546 {
547 	struct acp_dev_data *acp_data;
548 	u32 sdw0_en, sdw1_en;
549 
550 	acp_data = sdev->pdata->hw_pdata;
551 	if (!acp_data->sdw)
552 		return false;
553 
554 	sdw0_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW0_EN);
555 	sdw1_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SW1_EN);
556 	acp_data->sdw_en_stat = sdw0_en || sdw1_en;
557 	return acp_data->sdw_en_stat;
558 }
559 
560 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
561 {
562 	int ret;
563 
564 	/* When acp_reset() function is invoked, it will apply ACP SOFT reset and
565 	 * DSP reset. ACP Soft reset sequence will cause all ACP IP registers will
566 	 * be reset to default values which will break the ClockStop Mode functionality.
567 	 * Add a condition check to apply DSP reset when SoundWire ClockStop mode
568 	 * is selected. For the rest of the scenarios, apply acp reset sequence.
569 	 */
570 	if (check_acp_sdw_enable_status(sdev))
571 		return acp_dsp_reset(sdev);
572 
573 	ret = acp_reset(sdev);
574 	if (ret) {
575 		dev_err(sdev->dev, "ACP Reset failed\n");
576 		return ret;
577 	}
578 
579 	snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00);
580 
581 	return 0;
582 }
583 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON);
584 
585 int amd_sof_acp_resume(struct snd_sof_dev *sdev)
586 {
587 	int ret;
588 	struct acp_dev_data *acp_data;
589 
590 	acp_data = sdev->pdata->hw_pdata;
591 	if (!acp_data->sdw_en_stat) {
592 		ret = acp_init(sdev);
593 		if (ret) {
594 			dev_err(sdev->dev, "ACP Init failed\n");
595 			return ret;
596 		}
597 		return acp_memory_init(sdev);
598 	} else {
599 		return acp_dsp_reset(sdev);
600 	}
601 }
602 EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON);
603 
604 #if IS_ENABLED(CONFIG_SND_SOC_SOF_AMD_SOUNDWIRE)
605 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
606 {
607 	struct acpi_device *sdw_dev;
608 	struct acp_dev_data *acp_data;
609 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
610 
611 	if (!addr)
612 		return -ENODEV;
613 
614 	acp_data = sdev->pdata->hw_pdata;
615 	sdw_dev = acpi_find_child_device(ACPI_COMPANION(sdev->dev), addr, 0);
616 	if (!sdw_dev)
617 		return -ENODEV;
618 
619 	acp_data->info.handle = sdw_dev->handle;
620 	acp_data->info.count = desc->sdw_max_link_count;
621 
622 	return amd_sdw_scan_controller(&acp_data->info);
623 }
624 
625 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
626 {
627 	struct acp_dev_data *acp_data;
628 	struct sdw_amd_res sdw_res;
629 	int ret;
630 
631 	acp_data = sdev->pdata->hw_pdata;
632 
633 	memset(&sdw_res, 0, sizeof(sdw_res));
634 	sdw_res.addr = acp_data->addr;
635 	sdw_res.reg_range = acp_data->reg_range;
636 	sdw_res.handle = acp_data->info.handle;
637 	sdw_res.parent = sdev->dev;
638 	sdw_res.dev = sdev->dev;
639 	sdw_res.acp_lock = &acp_data->acp_lock;
640 	sdw_res.count = acp_data->info.count;
641 	sdw_res.link_mask = acp_data->info.link_mask;
642 	sdw_res.mmio_base = sdev->bar[ACP_DSP_BAR];
643 
644 	ret = sdw_amd_probe(&sdw_res, &acp_data->sdw);
645 	if (ret)
646 		dev_err(sdev->dev, "SoundWire probe failed\n");
647 	return ret;
648 }
649 
650 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
651 {
652 	struct acp_dev_data *acp_data;
653 
654 	acp_data = sdev->pdata->hw_pdata;
655 	if (acp_data->sdw)
656 		sdw_amd_exit(acp_data->sdw);
657 	acp_data->sdw = NULL;
658 
659 	return 0;
660 }
661 
662 #else
663 static int acp_sof_scan_sdw_devices(struct snd_sof_dev *sdev, u64 addr)
664 {
665 	return 0;
666 }
667 
668 static int amd_sof_sdw_probe(struct snd_sof_dev *sdev)
669 {
670 	return 0;
671 }
672 
673 static int amd_sof_sdw_exit(struct snd_sof_dev *sdev)
674 {
675 	return 0;
676 }
677 #endif
678 
679 int amd_sof_acp_probe(struct snd_sof_dev *sdev)
680 {
681 	struct pci_dev *pci = to_pci_dev(sdev->dev);
682 	struct acp_dev_data *adata;
683 	const struct sof_amd_acp_desc *chip;
684 	const struct dmi_system_id *dmi_id;
685 	unsigned int addr;
686 	int ret;
687 
688 	chip = get_chip_info(sdev->pdata);
689 	if (!chip) {
690 		dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device);
691 		return -EIO;
692 	}
693 	adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data),
694 			     GFP_KERNEL);
695 	if (!adata)
696 		return -ENOMEM;
697 
698 	adata->dev = sdev;
699 	adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
700 							PLATFORM_DEVID_NONE, NULL, 0);
701 	if (IS_ERR(adata->dmic_dev)) {
702 		dev_err(sdev->dev, "failed to register platform for dmic codec\n");
703 		return PTR_ERR(adata->dmic_dev);
704 	}
705 	addr = pci_resource_start(pci, ACP_DSP_BAR);
706 	sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR));
707 	if (!sdev->bar[ACP_DSP_BAR]) {
708 		dev_err(sdev->dev, "ioremap error\n");
709 		ret = -ENXIO;
710 		goto unregister_dev;
711 	}
712 
713 	pci_set_master(pci);
714 	adata->addr = addr;
715 	adata->reg_range = chip->reg_end_addr - chip->reg_start_addr;
716 	mutex_init(&adata->acp_lock);
717 	sdev->pdata->hw_pdata = adata;
718 	adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL);
719 	if (!adata->smn_dev) {
720 		dev_err(sdev->dev, "Failed to get host bridge device\n");
721 		ret = -ENODEV;
722 		goto unregister_dev;
723 	}
724 
725 	ret = acp_init(sdev);
726 	if (ret < 0)
727 		goto free_smn_dev;
728 
729 	sdev->ipc_irq = pci->irq;
730 	ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
731 				   IRQF_SHARED, "AudioDSP", sdev);
732 	if (ret < 0) {
733 		dev_err(sdev->dev, "failed to register IRQ %d\n",
734 			sdev->ipc_irq);
735 		goto free_smn_dev;
736 	}
737 
738 	/* scan SoundWire capabilities exposed by DSDT */
739 	ret = acp_sof_scan_sdw_devices(sdev, chip->sdw_acpi_dev_addr);
740 	if (ret < 0) {
741 		dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
742 		goto skip_soundwire;
743 	}
744 	ret = amd_sof_sdw_probe(sdev);
745 	if (ret < 0) {
746 		dev_err(sdev->dev, "error: SoundWire probe error\n");
747 		free_irq(sdev->ipc_irq, sdev);
748 		pci_dev_put(adata->smn_dev);
749 		return ret;
750 	}
751 
752 skip_soundwire:
753 	sdev->dsp_box.offset = 0;
754 	sdev->dsp_box.size = BOX_SIZE_512;
755 
756 	sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size;
757 	sdev->host_box.size = BOX_SIZE_512;
758 
759 	sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
760 	sdev->debug_box.size = BOX_SIZE_1024;
761 
762 	dmi_id = dmi_first_match(acp_sof_quirk_table);
763 	if (dmi_id) {
764 		adata->quirks = dmi_id->driver_data;
765 
766 		if (adata->quirks->signed_fw_image) {
767 			adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
768 							    "sof-%s-code.bin",
769 							    chip->name);
770 			if (!adata->fw_code_bin) {
771 				ret = -ENOMEM;
772 				goto free_ipc_irq;
773 			}
774 
775 			adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
776 							    "sof-%s-data.bin",
777 							    chip->name);
778 			if (!adata->fw_data_bin) {
779 				ret = -ENOMEM;
780 				goto free_ipc_irq;
781 			}
782 		}
783 	}
784 
785 	adata->enable_fw_debug = enable_fw_debug;
786 	acp_memory_init(sdev);
787 
788 	acp_dsp_stream_init(sdev);
789 
790 	return 0;
791 
792 free_ipc_irq:
793 	free_irq(sdev->ipc_irq, sdev);
794 free_smn_dev:
795 	pci_dev_put(adata->smn_dev);
796 unregister_dev:
797 	platform_device_unregister(adata->dmic_dev);
798 	return ret;
799 }
800 EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON);
801 
802 void amd_sof_acp_remove(struct snd_sof_dev *sdev)
803 {
804 	struct acp_dev_data *adata = sdev->pdata->hw_pdata;
805 
806 	if (adata->smn_dev)
807 		pci_dev_put(adata->smn_dev);
808 
809 	if (adata->sdw)
810 		amd_sof_sdw_exit(sdev);
811 
812 	if (sdev->ipc_irq)
813 		free_irq(sdev->ipc_irq, sdev);
814 
815 	if (adata->dmic_dev)
816 		platform_device_unregister(adata->dmic_dev);
817 
818 	acp_reset(sdev);
819 }
820 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
821 
822 MODULE_LICENSE("Dual BSD/GPL");
823 MODULE_DESCRIPTION("AMD ACP sof driver");
824 MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
825 MODULE_IMPORT_NS(SND_AMD_SOUNDWIRE_ACPI);
826