1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for HDA DSP code loader 16 */ 17 18 #include <linux/firmware.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include <sound/sof.h> 22 #include <sound/sof/ipc4/header.h> 23 #include "ext_manifest.h" 24 #include "../ipc4-priv.h" 25 #include "../ops.h" 26 #include "../sof-priv.h" 27 #include "hda.h" 28 29 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev) 30 { 31 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 32 const struct sof_intel_dsp_desc *chip = hda->desc; 33 int i; 34 35 /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */ 36 for (i = 0; i < chip->ssp_count; i++) { 37 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 38 chip->ssp_base_offset 39 + i * SSP_DEV_MEM_SIZE 40 + SSP_SSC1_OFFSET, 41 SSP_SET_CBP_CFP, 42 SSP_SET_CBP_CFP); 43 } 44 } 45 46 struct hdac_ext_stream *hda_cl_prepare(struct device *dev, unsigned int format, 47 unsigned int size, struct snd_dma_buffer *dmab, 48 int direction, bool is_iccmax) 49 { 50 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 51 struct hdac_ext_stream *hext_stream; 52 struct hdac_stream *hstream; 53 int ret; 54 55 hext_stream = hda_dsp_stream_get(sdev, direction, 0); 56 57 if (!hext_stream) { 58 dev_err(sdev->dev, "error: no stream available\n"); 59 return ERR_PTR(-ENODEV); 60 } 61 hstream = &hext_stream->hstream; 62 hstream->substream = NULL; 63 64 /* allocate DMA buffer */ 65 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, dev, size, dmab); 66 if (ret < 0) { 67 dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret); 68 goto out_put; 69 } 70 71 hstream->period_bytes = 0;/* initialize period_bytes */ 72 hstream->format_val = format; 73 hstream->bufsize = size; 74 75 if (is_iccmax) { 76 ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL); 77 if (ret < 0) { 78 dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret); 79 goto out_free; 80 } 81 } else { 82 ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL); 83 if (ret < 0) { 84 dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret); 85 goto out_free; 86 } 87 hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size); 88 } 89 90 return hext_stream; 91 92 out_free: 93 snd_dma_free_pages(dmab); 94 out_put: 95 hda_dsp_stream_put(sdev, direction, hstream->stream_tag); 96 return ERR_PTR(ret); 97 } 98 EXPORT_SYMBOL_NS(hda_cl_prepare, SND_SOC_SOF_INTEL_HDA_COMMON); 99 100 /* 101 * first boot sequence has some extra steps. 102 * power on all host managed cores and only unstall/run the boot core to boot the 103 * DSP then turn off all non boot cores (if any) is powered on. 104 */ 105 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot) 106 { 107 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 108 const struct sof_intel_dsp_desc *chip = hda->desc; 109 unsigned int status, target_status; 110 u32 flags, ipc_hdr, j; 111 unsigned long mask; 112 char *dump_msg; 113 int ret; 114 115 /* step 1: power up corex */ 116 ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask); 117 if (ret < 0) { 118 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 119 dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n"); 120 goto err; 121 } 122 123 hda_ssp_set_cbp_cfp(sdev); 124 125 /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */ 126 ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL; 127 if (!imr_boot) 128 ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9); 129 130 snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr); 131 132 /* step 3: unset core 0 reset state & unstall/run core 0 */ 133 ret = hda_dsp_core_run(sdev, chip->init_core_mask); 134 if (ret < 0) { 135 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 136 dev_err(sdev->dev, 137 "error: dsp core start failed %d\n", ret); 138 ret = -EIO; 139 goto err; 140 } 141 142 /* step 4: wait for IPC DONE bit from ROM */ 143 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 144 chip->ipc_ack, status, 145 ((status & chip->ipc_ack_mask) 146 == chip->ipc_ack_mask), 147 HDA_DSP_REG_POLL_INTERVAL_US, 148 HDA_DSP_INIT_TIMEOUT_US); 149 150 if (ret < 0) { 151 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 152 dev_err(sdev->dev, 153 "error: %s: timeout for HIPCIE done\n", 154 __func__); 155 goto err; 156 } 157 158 /* set DONE bit to clear the reply IPC message */ 159 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, 160 chip->ipc_ack, 161 chip->ipc_ack_mask, 162 chip->ipc_ack_mask); 163 164 /* step 5: power down cores that are no longer needed */ 165 ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask & 166 ~(chip->init_core_mask)); 167 if (ret < 0) { 168 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 169 dev_err(sdev->dev, 170 "error: dsp core x power down failed\n"); 171 goto err; 172 } 173 174 /* step 6: enable IPC interrupts */ 175 hda_dsp_ipc_int_enable(sdev); 176 177 /* 178 * step 7: 179 * - Cold/Full boot: wait for ROM init to proceed to download the firmware 180 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR) 181 */ 182 if (imr_boot) 183 target_status = FSR_STATE_FW_ENTERED; 184 else 185 target_status = FSR_STATE_INIT_DONE; 186 187 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 188 chip->rom_status_reg, status, 189 (FSR_TO_STATE_CODE(status) == target_status), 190 HDA_DSP_REG_POLL_INTERVAL_US, 191 chip->rom_init_timeout * 192 USEC_PER_MSEC); 193 if (!ret) { 194 /* set enabled cores mask and increment ref count for cores in init_core_mask */ 195 sdev->enabled_cores_mask |= chip->init_core_mask; 196 mask = sdev->enabled_cores_mask; 197 for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES) 198 sdev->dsp_core_ref_count[j]++; 199 return 0; 200 } 201 202 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 203 dev_err(sdev->dev, 204 "%s: timeout with rom_status_reg (%#x) read\n", 205 __func__, chip->rom_status_reg); 206 207 err: 208 flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL; 209 210 /* after max boot attempts make sure that the dump is printed */ 211 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 212 flags &= ~SOF_DBG_DUMP_OPTIONAL; 213 214 dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d", 215 hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS); 216 snd_sof_dsp_dbg_dump(sdev, dump_msg, flags); 217 hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask); 218 219 kfree(dump_msg); 220 return ret; 221 } 222 EXPORT_SYMBOL_NS(cl_dsp_init, SND_SOC_SOF_INTEL_HDA_COMMON); 223 224 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd) 225 { 226 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 227 struct hdac_stream *hstream = &hext_stream->hstream; 228 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 229 struct sof_intel_hda_stream *hda_stream; 230 231 /* code loader is special case that reuses stream ops */ 232 switch (cmd) { 233 case SNDRV_PCM_TRIGGER_START: 234 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 235 hext_stream); 236 reinit_completion(&hda_stream->ioc); 237 238 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL, 239 1 << hstream->index, 240 1 << hstream->index); 241 242 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 243 sd_offset, 244 SOF_HDA_SD_CTL_DMA_START | 245 SOF_HDA_CL_DMA_SD_INT_MASK, 246 SOF_HDA_SD_CTL_DMA_START | 247 SOF_HDA_CL_DMA_SD_INT_MASK); 248 249 hstream->running = true; 250 return 0; 251 default: 252 return hda_dsp_stream_trigger(sdev, hext_stream, cmd); 253 } 254 } 255 EXPORT_SYMBOL_NS(hda_cl_trigger, SND_SOC_SOF_INTEL_HDA_COMMON); 256 257 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab, 258 struct hdac_ext_stream *hext_stream) 259 { 260 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 261 struct hdac_stream *hstream = &hext_stream->hstream; 262 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 263 int ret = 0; 264 265 if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) 266 ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0); 267 else 268 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 269 SOF_HDA_SD_CTL_DMA_START, 0); 270 271 hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag); 272 hstream->running = 0; 273 hstream->substream = NULL; 274 275 /* reset BDL address */ 276 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 277 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0); 278 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 279 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0); 280 281 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0); 282 snd_dma_free_pages(dmab); 283 dmab->area = NULL; 284 hstream->bufsize = 0; 285 hstream->format_val = 0; 286 287 return ret; 288 } 289 EXPORT_SYMBOL_NS(hda_cl_cleanup, SND_SOC_SOF_INTEL_HDA_COMMON); 290 291 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500 292 293 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream) 294 { 295 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 296 const struct sof_intel_dsp_desc *chip = hda->desc; 297 struct sof_intel_hda_stream *hda_stream; 298 unsigned long time_left; 299 unsigned int reg; 300 int ret, status; 301 302 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 303 hext_stream); 304 305 dev_dbg(sdev->dev, "Code loader DMA starting\n"); 306 307 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 308 if (ret < 0) { 309 dev_err(sdev->dev, "error: DMA trigger start failed\n"); 310 return ret; 311 } 312 313 /* Wait for completion of transfer */ 314 time_left = wait_for_completion_timeout(&hda_stream->ioc, 315 msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS)); 316 317 if (!time_left) { 318 dev_err(sdev->dev, "Code loader DMA did not complete\n"); 319 return -ETIMEDOUT; 320 } 321 dev_dbg(sdev->dev, "Code loader DMA done, waiting for FW_ENTERED status\n"); 322 323 status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 324 chip->rom_status_reg, reg, 325 (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED), 326 HDA_DSP_REG_POLL_INTERVAL_US, 327 HDA_DSP_BASEFW_TIMEOUT_US); 328 329 /* 330 * even in case of errors we still need to stop the DMAs, 331 * but we return the initial error should the DMA stop also fail 332 */ 333 334 if (status < 0) { 335 dev_err(sdev->dev, 336 "%s: timeout with rom_status_reg (%#x) read\n", 337 __func__, chip->rom_status_reg); 338 } else { 339 dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n"); 340 } 341 342 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 343 if (ret < 0) { 344 dev_err(sdev->dev, "error: DMA trigger stop failed\n"); 345 if (!status) 346 status = ret; 347 } else { 348 dev_dbg(sdev->dev, "Code loader DMA stopped\n"); 349 } 350 351 return status; 352 } 353 354 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev) 355 { 356 struct hdac_ext_stream *iccmax_stream; 357 struct snd_dma_buffer dmab_bdl; 358 int ret, ret1; 359 u8 original_gb; 360 361 /* save the original LTRP guardband value */ 362 original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) & 363 HDA_VS_INTEL_LTRP_GB_MASK; 364 365 /* 366 * Prepare capture stream for ICCMAX. We do not need to store 367 * the data, so use a buffer of PAGE_SIZE for receiving. 368 */ 369 iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE, 370 &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE, true); 371 if (IS_ERR(iccmax_stream)) { 372 dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n"); 373 return PTR_ERR(iccmax_stream); 374 } 375 376 ret = hda_dsp_cl_boot_firmware(sdev); 377 378 /* 379 * Perform iccmax stream cleanup. This should be done even if firmware loading fails. 380 * If the cleanup also fails, we return the initial error 381 */ 382 ret1 = hda_cl_cleanup(sdev->dev, &dmab_bdl, iccmax_stream); 383 if (ret1 < 0) { 384 dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n"); 385 386 /* set return value to indicate cleanup failure */ 387 if (!ret) 388 ret = ret1; 389 } 390 391 /* restore the original guardband value after FW boot */ 392 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP, 393 HDA_VS_INTEL_LTRP_GB_MASK, original_gb); 394 395 return ret; 396 } 397 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware_iccmax, SND_SOC_SOF_INTEL_CNL); 398 399 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev) 400 { 401 const struct sof_intel_dsp_desc *chip_info; 402 int ret; 403 404 chip_info = get_chip_info(sdev->pdata); 405 if (chip_info->cl_init) 406 ret = chip_info->cl_init(sdev, 0, true); 407 else 408 ret = -EINVAL; 409 410 if (!ret) 411 hda_sdw_process_wakeen(sdev); 412 413 return ret; 414 } 415 416 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) 417 { 418 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 419 struct snd_sof_pdata *plat_data = sdev->pdata; 420 const struct sof_dev_desc *desc = plat_data->desc; 421 const struct sof_intel_dsp_desc *chip_info; 422 struct hdac_ext_stream *hext_stream; 423 struct firmware stripped_firmware; 424 struct snd_dma_buffer dmab; 425 int ret, ret1, i; 426 427 if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) { 428 dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n"); 429 hda->boot_iteration = 0; 430 ret = hda_dsp_boot_imr(sdev); 431 if (!ret) { 432 hda->booted_from_imr = true; 433 return 0; 434 } 435 436 dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n"); 437 } 438 439 hda->booted_from_imr = false; 440 441 chip_info = desc->chip_info; 442 443 if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) { 444 dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n"); 445 return -EINVAL; 446 } 447 448 stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset; 449 stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset; 450 451 /* init for booting wait */ 452 init_waitqueue_head(&sdev->boot_wait); 453 454 /* prepare DMA for code loader stream */ 455 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 456 stripped_firmware.size, 457 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 458 if (IS_ERR(hext_stream)) { 459 dev_err(sdev->dev, "error: dma prepare for fw loading failed\n"); 460 return PTR_ERR(hext_stream); 461 } 462 463 memcpy(dmab.area, stripped_firmware.data, 464 stripped_firmware.size); 465 466 /* try ROM init a few times before giving up */ 467 for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) { 468 dev_dbg(sdev->dev, 469 "Attempting iteration %d of Core En/ROM load...\n", i); 470 471 hda->boot_iteration = i + 1; 472 if (chip_info->cl_init) 473 ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false); 474 else 475 ret = -EINVAL; 476 477 /* don't retry anymore if successful */ 478 if (!ret) 479 break; 480 } 481 482 if (i == HDA_FW_BOOT_ATTEMPTS) { 483 dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n", 484 i, ret); 485 goto cleanup; 486 } 487 488 /* 489 * When a SoundWire link is in clock stop state, a Slave 490 * device may trigger in-band wakes for events such as jack 491 * insertion or acoustic event detection. This event will lead 492 * to a WAKEEN interrupt, handled by the PCI device and routed 493 * to PME if the PCI device is in D3. The resume function in 494 * audio PCI driver will be invoked by ACPI for PME event and 495 * initialize the device and process WAKEEN interrupt. 496 * 497 * The WAKEEN interrupt should be processed ASAP to prevent an 498 * interrupt flood, otherwise other interrupts, such IPC, 499 * cannot work normally. The WAKEEN is handled after the ROM 500 * is initialized successfully, which ensures power rails are 501 * enabled before accessing the SoundWire SHIM registers 502 */ 503 if (!sdev->first_boot) 504 hda_sdw_process_wakeen(sdev); 505 506 /* 507 * Set the boot_iteration to the last attempt, indicating that the 508 * DSP ROM has been initialized and from this point there will be no 509 * retry done to boot. 510 * 511 * Continue with code loading and firmware boot 512 */ 513 hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS; 514 ret = hda_cl_copy_fw(sdev, hext_stream); 515 if (!ret) { 516 dev_dbg(sdev->dev, "Firmware download successful, booting...\n"); 517 hda->skip_imr_boot = false; 518 } else { 519 snd_sof_dsp_dbg_dump(sdev, "Firmware download failed", 520 SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX); 521 hda->skip_imr_boot = true; 522 } 523 524 cleanup: 525 /* 526 * Perform codeloader stream cleanup. 527 * This should be done even if firmware loading fails. 528 * If the cleanup also fails, we return the initial error 529 */ 530 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 531 if (ret1 < 0) { 532 dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n"); 533 534 /* set return value to indicate cleanup failure */ 535 if (!ret) 536 ret = ret1; 537 } 538 539 /* 540 * return primary core id if both fw copy 541 * and stream clean up are successful 542 */ 543 if (!ret) 544 return chip_info->init_core_mask; 545 546 /* disable DSP */ 547 hda_dsp_ctrl_ppcap_enable(sdev, false); 548 549 return ret; 550 } 551 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware, SND_SOC_SOF_INTEL_HDA_COMMON); 552 553 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev, 554 struct sof_ipc4_fw_library *fw_lib, bool reload) 555 { 556 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 557 struct sof_ipc4_fw_data *ipc4_data = sdev->private; 558 struct hdac_ext_stream *hext_stream; 559 struct firmware stripped_firmware; 560 struct sof_ipc4_msg msg = {}; 561 struct snd_dma_buffer dmab; 562 int ret, ret1; 563 564 /* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */ 565 if (reload && hda->booted_from_imr && ipc4_data->fw_context_save) 566 return 0; 567 568 /* the fw_lib has been verified during loading, we can trust the validity here */ 569 stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset; 570 stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset; 571 572 /* prepare DMA for code loader stream */ 573 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 574 stripped_firmware.size, 575 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 576 if (IS_ERR(hext_stream)) { 577 dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__); 578 return PTR_ERR(hext_stream); 579 } 580 581 memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size); 582 583 /* 584 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE 585 * Message includes the dma_id to be prepared for the library loading. 586 * If the firmware does not have support for the message, we will 587 * receive -EOPNOTSUPP. In this case we will use single step library 588 * loading and proceed to send the LOAD_LIBRARY message. 589 */ 590 msg.primary = hext_stream->hstream.stream_tag - 1; 591 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE); 592 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST); 593 msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG); 594 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 595 if (!ret) { 596 int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream); 597 unsigned int status; 598 599 /* 600 * Make sure that the FIFOS value is not 0 in SDxFIFOS register 601 * which indicates that the firmware set the GEN bit and we can 602 * continue to start the DMA 603 */ 604 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR, 605 sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE, 606 status, 607 status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK, 608 HDA_DSP_REG_POLL_INTERVAL_US, 609 HDA_DSP_BASEFW_TIMEOUT_US); 610 611 if (ret < 0) 612 dev_warn(sdev->dev, 613 "%s: timeout waiting for FIFOS\n", __func__); 614 } else if (ret != -EOPNOTSUPP) { 615 goto cleanup; 616 } 617 618 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 619 if (ret < 0) { 620 dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__); 621 goto cleanup; 622 } 623 624 /* 625 * 2nd stage: LOAD_LIBRARY 626 * Message includes the dma_id and the lib_id, the dma_id must be 627 * identical to the one sent via LOAD_LIBRARY_PREPARE 628 */ 629 msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK; 630 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY); 631 msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id); 632 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 633 634 /* Stop the DMA channel */ 635 ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 636 if (ret1 < 0) { 637 dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__); 638 if (!ret) 639 ret = ret1; 640 } 641 642 cleanup: 643 /* clean up even in case of error and return the first error */ 644 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 645 if (ret1 < 0) { 646 dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__); 647 648 /* set return value to indicate cleanup failure */ 649 if (!ret) 650 ret = ret1; 651 } 652 653 return ret; 654 } 655 EXPORT_SYMBOL_NS(hda_dsp_ipc4_load_library, SND_SOC_SOF_INTEL_HDA_COMMON); 656 657 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev, 658 const struct sof_ext_man_elem_header *hdr) 659 { 660 const struct sof_ext_man_cavs_config_data *config_data = 661 container_of(hdr, struct sof_ext_man_cavs_config_data, hdr); 662 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 663 int i, elem_num; 664 665 /* calculate total number of config data elements */ 666 elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header)) 667 / sizeof(struct sof_config_elem); 668 if (elem_num <= 0) { 669 dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num); 670 return -EINVAL; 671 } 672 673 for (i = 0; i < elem_num; i++) 674 switch (config_data->elems[i].token) { 675 case SOF_EXT_MAN_CAVS_CONFIG_EMPTY: 676 /* skip empty token */ 677 break; 678 case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO: 679 hda->clk_config_lpro = config_data->elems[i].value; 680 dev_dbg(sdev->dev, "FW clock config: %s\n", 681 hda->clk_config_lpro ? "LPRO" : "HPRO"); 682 break; 683 case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE: 684 case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE: 685 /* These elements are defined but not being used yet. No warn is required */ 686 break; 687 default: 688 dev_info(sdev->dev, "unsupported token type: %d\n", 689 config_data->elems[i].token); 690 } 691 692 return 0; 693 } 694 EXPORT_SYMBOL_NS(hda_dsp_ext_man_get_cavs_config_data, SND_SOC_SOF_INTEL_HDA_COMMON); 695