1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved. 7 // 8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com> 9 // Ajit Kumar Pandey <AjitKumar.Pandey@amd.com> 10 11 /* 12 * Hardware interface for generic AMD ACP processor 13 */ 14 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 19 #include "../ops.h" 20 #include "acp.h" 21 #include "acp-dsp-offset.h" 22 23 static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data) 24 { 25 pci_write_config_dword(dev, 0x60, smn_addr); 26 pci_write_config_dword(dev, 0x64, data); 27 28 return 0; 29 } 30 31 static int smn_read(struct pci_dev *dev, u32 smn_addr, u32 *data) 32 { 33 pci_write_config_dword(dev, 0x60, smn_addr); 34 pci_read_config_dword(dev, 0x64, data); 35 36 return 0; 37 } 38 39 static void init_dma_descriptor(struct acp_dev_data *adata) 40 { 41 struct snd_sof_dev *sdev = adata->dev; 42 unsigned int addr; 43 44 addr = ACP_SRAM_PTE_OFFSET + offsetof(struct scratch_reg_conf, dma_desc); 45 46 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr); 47 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT); 48 } 49 50 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx, 51 struct dma_descriptor *dscr_info) 52 { 53 struct snd_sof_dev *sdev = adata->dev; 54 unsigned int offset; 55 56 offset = ACP_SCRATCH_REG_0 + offsetof(struct scratch_reg_conf, dma_desc) + 57 idx * sizeof(struct dma_descriptor); 58 59 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr); 60 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr); 61 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all); 62 } 63 64 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch, 65 unsigned int idx, unsigned int dscr_count) 66 { 67 struct snd_sof_dev *sdev = adata->dev; 68 unsigned int val, status; 69 int ret; 70 71 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), 72 ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN); 73 74 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val, 75 val & (1 << ch), ACP_REG_POLL_INTERVAL, 76 ACP_REG_POLL_TIMEOUT_US); 77 if (ret < 0) { 78 status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS); 79 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32)); 80 81 dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status); 82 return ret; 83 } 84 85 snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0); 86 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count); 87 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx); 88 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0); 89 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN); 90 91 return ret; 92 } 93 94 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch, 95 unsigned int dscr_count, struct dma_descriptor *dscr_info) 96 { 97 struct snd_sof_dev *sdev = adata->dev; 98 int ret; 99 u16 dscr; 100 101 if (!dscr_info || !dscr_count) 102 return -EINVAL; 103 104 for (dscr = 0; dscr < dscr_count; dscr++) 105 configure_dma_descriptor(adata, dscr, dscr_info++); 106 107 ret = config_dma_channel(adata, ch, 0, dscr_count); 108 if (ret < 0) 109 dev_err(sdev->dev, "config dma ch failed:%d\n", ret); 110 111 return ret; 112 } 113 114 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr, 115 unsigned int dest_addr, int dsp_data_size) 116 { 117 struct snd_sof_dev *sdev = adata->dev; 118 unsigned int desc_count, index; 119 int ret; 120 121 for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0; 122 desc_count++, dsp_data_size -= ACP_PAGE_SIZE) { 123 adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE; 124 adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE; 125 adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE; 126 if (dsp_data_size < ACP_PAGE_SIZE) 127 adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size; 128 } 129 130 ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info); 131 if (ret) 132 dev_err(sdev->dev, "acpbus_dma_start failed\n"); 133 134 /* Clear descriptor array */ 135 for (index = 0; index < desc_count; index++) 136 memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor)); 137 138 return ret; 139 } 140 141 /* 142 * psp_mbox_ready- function to poll ready bit of psp mbox 143 * @adata: acp device data 144 * @ack: bool variable to check ready bit status or psp ack 145 */ 146 147 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack) 148 { 149 struct snd_sof_dev *sdev = adata->dev; 150 int timeout; 151 u32 data; 152 153 for (timeout = ACP_PSP_TIMEOUT_COUNTER; timeout > 0; timeout--) { 154 msleep(20); 155 smn_read(adata->smn_dev, MP0_C2PMSG_114_REG, &data); 156 if (data & MBOX_READY_MASK) 157 return 0; 158 } 159 160 dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK); 161 162 if (ack) 163 return -ETIMEDOUT; 164 165 return -EBUSY; 166 } 167 168 /* 169 * psp_send_cmd - function to send psp command over mbox 170 * @adata: acp device data 171 * @cmd: non zero integer value for command type 172 */ 173 174 static int psp_send_cmd(struct acp_dev_data *adata, int cmd) 175 { 176 struct snd_sof_dev *sdev = adata->dev; 177 int ret, timeout; 178 u32 data; 179 180 if (!cmd) 181 return -EINVAL; 182 183 /* Get a non-zero Doorbell value from PSP */ 184 for (timeout = ACP_PSP_TIMEOUT_COUNTER; timeout > 0; timeout--) { 185 msleep(MBOX_DELAY); 186 smn_read(adata->smn_dev, MP0_C2PMSG_73_REG, &data); 187 if (data) 188 break; 189 } 190 191 if (!timeout) { 192 dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG); 193 return -EINVAL; 194 } 195 196 /* Check if PSP is ready for new command */ 197 ret = psp_mbox_ready(adata, 0); 198 if (ret) 199 return ret; 200 201 smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd); 202 203 /* Ring the Doorbell for PSP */ 204 smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data); 205 206 /* Check MBOX ready as PSP ack */ 207 ret = psp_mbox_ready(adata, 1); 208 209 return ret; 210 } 211 212 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr, 213 unsigned int start_addr, unsigned int dest_addr, 214 unsigned int image_length) 215 { 216 struct snd_sof_dev *sdev = adata->dev; 217 unsigned int tx_count, fw_qualifier, val; 218 int ret; 219 220 if (!image_addr) { 221 dev_err(sdev->dev, "SHA DMA image address is NULL\n"); 222 return -EINVAL; 223 } 224 225 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD); 226 if (val & ACP_SHA_RUN) { 227 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET); 228 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS, 229 val, val & ACP_SHA_RESET, 230 ACP_REG_POLL_INTERVAL, 231 ACP_REG_POLL_TIMEOUT_US); 232 if (ret < 0) { 233 dev_err(sdev->dev, "SHA DMA Failed to Reset\n"); 234 return ret; 235 } 236 } 237 238 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr); 239 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr); 240 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length); 241 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN); 242 243 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT, 244 tx_count, tx_count == image_length, 245 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 246 if (ret < 0) { 247 dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count); 248 return ret; 249 } 250 251 ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND); 252 if (ret) 253 return ret; 254 255 fw_qualifier = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER); 256 if (!(fw_qualifier & DSP_FW_RUN_ENABLE)) { 257 dev_err(sdev->dev, "PSP validation failed\n"); 258 return -EINVAL; 259 } 260 261 return 0; 262 } 263 264 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch) 265 { 266 struct snd_sof_dev *sdev = adata->dev; 267 unsigned int val; 268 int ret = 0; 269 270 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32)); 271 if (val & ACP_DMA_CH_RUN) { 272 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val, 273 ACP_REG_POLL_INTERVAL, 274 ACP_DMA_COMPLETE_TIMEOUT_US); 275 if (ret < 0) 276 dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch); 277 } 278 279 return ret; 280 } 281 282 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes) 283 { 284 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 285 int i, j; 286 287 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 288 dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i); 289 } 290 291 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes) 292 { 293 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 294 int i, j; 295 296 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 297 snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]); 298 } 299 300 static int acp_memory_init(struct snd_sof_dev *sdev) 301 { 302 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 303 304 snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, ACP_DSP_SW_INTR_CNTL, 305 ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK); 306 init_dma_descriptor(adata); 307 308 return 0; 309 } 310 311 static irqreturn_t acp_irq_thread(int irq, void *context) 312 { 313 struct snd_sof_dev *sdev = context; 314 unsigned int val, count = ACP_HW_SEM_RETRY_COUNT; 315 316 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_EXTERNAL_INTR_STAT); 317 if (val & ACP_SHA_STAT) { 318 /* Clear SHA interrupt raised by PSP */ 319 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_EXTERNAL_INTR_STAT, val); 320 return IRQ_HANDLED; 321 } 322 323 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP_SW_INTR_STAT); 324 if (val & ACP_DSP_TO_HOST_IRQ) { 325 while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_AXI2DAGB_SEM_0)) { 326 /* Wait until acquired HW Semaphore lock or timeout */ 327 count--; 328 if (!count) { 329 dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__); 330 return IRQ_NONE; 331 } 332 } 333 334 sof_ops(sdev)->irq_thread(irq, sdev); 335 val |= ACP_DSP_TO_HOST_IRQ; 336 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP_SW_INTR_STAT, val); 337 338 /* Unlock or Release HW Semaphore */ 339 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_AXI2DAGB_SEM_0, 0x0); 340 341 return IRQ_HANDLED; 342 } 343 344 return IRQ_NONE; 345 }; 346 347 static irqreturn_t acp_irq_handler(int irq, void *dev_id) 348 { 349 struct snd_sof_dev *sdev = dev_id; 350 unsigned int val; 351 352 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP_SW_INTR_STAT); 353 if (val) 354 return IRQ_WAKE_THREAD; 355 356 return IRQ_NONE; 357 } 358 359 static int acp_power_on(struct snd_sof_dev *sdev) 360 { 361 unsigned int val; 362 int ret; 363 364 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_PGFSM_STATUS); 365 366 if (val == ACP_POWERED_ON) 367 return 0; 368 369 if (val & ACP_PGFSM_STATUS_MASK) 370 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_PGFSM_CONTROL, 371 ACP_PGFSM_CNTL_POWER_ON_MASK); 372 373 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_PGFSM_STATUS, val, !val, 374 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 375 if (ret < 0) 376 dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n"); 377 378 return ret; 379 } 380 381 static int acp_reset(struct snd_sof_dev *sdev) 382 { 383 unsigned int val; 384 int ret; 385 386 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET); 387 388 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, 389 val & ACP_SOFT_RESET_DONE_MASK, 390 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 391 if (ret < 0) { 392 dev_err(sdev->dev, "timeout asserting reset\n"); 393 return ret; 394 } 395 396 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET); 397 398 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val, 399 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 400 if (ret < 0) 401 dev_err(sdev->dev, "timeout in releasing reset\n"); 402 403 return ret; 404 } 405 406 static int acp_init(struct snd_sof_dev *sdev) 407 { 408 int ret; 409 410 /* power on */ 411 ret = acp_power_on(sdev); 412 if (ret) { 413 dev_err(sdev->dev, "ACP power on failed\n"); 414 return ret; 415 } 416 417 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01); 418 /* Reset */ 419 return acp_reset(sdev); 420 } 421 422 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state) 423 { 424 int ret; 425 426 ret = acp_reset(sdev); 427 if (ret) { 428 dev_err(sdev->dev, "ACP Reset failed\n"); 429 return ret; 430 } 431 432 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00); 433 434 return 0; 435 } 436 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON); 437 438 int amd_sof_acp_resume(struct snd_sof_dev *sdev) 439 { 440 int ret; 441 442 ret = acp_init(sdev); 443 if (ret) { 444 dev_err(sdev->dev, "ACP Init failed\n"); 445 return ret; 446 } 447 448 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CLKMUX_SEL, 0x03); 449 450 ret = acp_memory_init(sdev); 451 452 return ret; 453 } 454 EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON); 455 456 int amd_sof_acp_probe(struct snd_sof_dev *sdev) 457 { 458 struct pci_dev *pci = to_pci_dev(sdev->dev); 459 struct acp_dev_data *adata; 460 const struct sof_amd_acp_desc *chip; 461 unsigned int addr; 462 int ret; 463 464 adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data), 465 GFP_KERNEL); 466 if (!adata) 467 return -ENOMEM; 468 469 adata->dev = sdev; 470 addr = pci_resource_start(pci, ACP_DSP_BAR); 471 sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR)); 472 if (!sdev->bar[ACP_DSP_BAR]) { 473 dev_err(sdev->dev, "ioremap error\n"); 474 return -ENXIO; 475 } 476 477 pci_set_master(pci); 478 479 sdev->pdata->hw_pdata = adata; 480 481 chip = get_chip_info(sdev->pdata); 482 if (!chip) { 483 dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device); 484 return -EIO; 485 } 486 487 adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL); 488 if (!adata->smn_dev) { 489 dev_err(sdev->dev, "Failed to get host bridge device\n"); 490 return -ENODEV; 491 } 492 493 sdev->ipc_irq = pci->irq; 494 ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread, 495 IRQF_SHARED, "AudioDSP", sdev); 496 if (ret < 0) { 497 dev_err(sdev->dev, "failed to register IRQ %d\n", 498 sdev->ipc_irq); 499 pci_dev_put(adata->smn_dev); 500 return ret; 501 } 502 503 ret = acp_init(sdev); 504 if (ret < 0) { 505 free_irq(sdev->ipc_irq, sdev); 506 pci_dev_put(adata->smn_dev); 507 return ret; 508 } 509 510 acp_memory_init(sdev); 511 512 acp_dsp_stream_init(sdev); 513 514 return 0; 515 } 516 EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON); 517 518 int amd_sof_acp_remove(struct snd_sof_dev *sdev) 519 { 520 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 521 522 if (adata->smn_dev) 523 pci_dev_put(adata->smn_dev); 524 525 if (sdev->ipc_irq) 526 free_irq(sdev->ipc_irq, sdev); 527 528 return acp_reset(sdev); 529 } 530 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON); 531 532 MODULE_DESCRIPTION("AMD ACP sof driver"); 533 MODULE_LICENSE("Dual BSD/GPL"); 534