1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for HDA DSP code loader 16 */ 17 18 #include <linux/debugfs.h> 19 #include <linux/firmware.h> 20 #include <sound/hdaudio_ext.h> 21 #include <sound/hda_register.h> 22 #include <sound/sof.h> 23 #include <sound/sof/ipc4/header.h> 24 #include "ext_manifest.h" 25 #include "../ipc4-priv.h" 26 #include "../ops.h" 27 #include "../sof-priv.h" 28 #include "hda.h" 29 30 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev) 31 { 32 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 33 const struct sof_intel_dsp_desc *chip = hda->desc; 34 int i; 35 36 /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */ 37 for (i = 0; i < chip->ssp_count; i++) { 38 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 39 chip->ssp_base_offset 40 + i * SSP_DEV_MEM_SIZE 41 + SSP_SSC1_OFFSET, 42 SSP_SET_CBP_CFP, 43 SSP_SET_CBP_CFP); 44 } 45 } 46 47 struct hdac_ext_stream *hda_cl_prepare(struct device *dev, unsigned int format, 48 unsigned int size, struct snd_dma_buffer *dmab, 49 int direction, bool is_iccmax) 50 { 51 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 52 struct hdac_ext_stream *hext_stream; 53 struct hdac_stream *hstream; 54 int ret; 55 56 hext_stream = hda_dsp_stream_get(sdev, direction, 0); 57 58 if (!hext_stream) { 59 dev_err(sdev->dev, "error: no stream available\n"); 60 return ERR_PTR(-ENODEV); 61 } 62 hstream = &hext_stream->hstream; 63 hstream->substream = NULL; 64 65 /* allocate DMA buffer */ 66 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, dev, size, dmab); 67 if (ret < 0) { 68 dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret); 69 goto out_put; 70 } 71 72 hstream->period_bytes = 0;/* initialize period_bytes */ 73 hstream->format_val = format; 74 hstream->bufsize = size; 75 76 if (is_iccmax) { 77 ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL); 78 if (ret < 0) { 79 dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret); 80 goto out_free; 81 } 82 } else { 83 ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL); 84 if (ret < 0) { 85 dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret); 86 goto out_free; 87 } 88 hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size); 89 } 90 91 return hext_stream; 92 93 out_free: 94 snd_dma_free_pages(dmab); 95 out_put: 96 hda_dsp_stream_put(sdev, direction, hstream->stream_tag); 97 return ERR_PTR(ret); 98 } 99 EXPORT_SYMBOL_NS(hda_cl_prepare, SND_SOC_SOF_INTEL_HDA_COMMON); 100 101 /* 102 * first boot sequence has some extra steps. 103 * power on all host managed cores and only unstall/run the boot core to boot the 104 * DSP then turn off all non boot cores (if any) is powered on. 105 */ 106 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot) 107 { 108 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 109 const struct sof_intel_dsp_desc *chip = hda->desc; 110 unsigned int status, target_status; 111 u32 flags, ipc_hdr, j; 112 unsigned long mask; 113 char *dump_msg; 114 int ret; 115 116 /* step 1: power up corex */ 117 ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask); 118 if (ret < 0) { 119 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 120 dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n"); 121 goto err; 122 } 123 124 hda_ssp_set_cbp_cfp(sdev); 125 126 /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */ 127 ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL; 128 if (!imr_boot) 129 ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9); 130 131 snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr); 132 133 /* step 3: unset core 0 reset state & unstall/run core 0 */ 134 ret = hda_dsp_core_run(sdev, chip->init_core_mask); 135 if (ret < 0) { 136 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 137 dev_err(sdev->dev, 138 "error: dsp core start failed %d\n", ret); 139 ret = -EIO; 140 goto err; 141 } 142 143 /* step 4: wait for IPC DONE bit from ROM */ 144 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 145 chip->ipc_ack, status, 146 ((status & chip->ipc_ack_mask) 147 == chip->ipc_ack_mask), 148 HDA_DSP_REG_POLL_INTERVAL_US, 149 HDA_DSP_INIT_TIMEOUT_US); 150 151 if (ret < 0) { 152 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 153 dev_err(sdev->dev, 154 "error: %s: timeout for HIPCIE done\n", 155 __func__); 156 goto err; 157 } 158 159 /* set DONE bit to clear the reply IPC message */ 160 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, 161 chip->ipc_ack, 162 chip->ipc_ack_mask, 163 chip->ipc_ack_mask); 164 165 /* step 5: power down cores that are no longer needed */ 166 ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask & 167 ~(chip->init_core_mask)); 168 if (ret < 0) { 169 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 170 dev_err(sdev->dev, 171 "error: dsp core x power down failed\n"); 172 goto err; 173 } 174 175 /* step 6: enable IPC interrupts */ 176 hda_dsp_ipc_int_enable(sdev); 177 178 /* 179 * step 7: 180 * - Cold/Full boot: wait for ROM init to proceed to download the firmware 181 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR) 182 */ 183 if (imr_boot) 184 target_status = FSR_STATE_FW_ENTERED; 185 else 186 target_status = FSR_STATE_INIT_DONE; 187 188 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 189 chip->rom_status_reg, status, 190 (FSR_TO_STATE_CODE(status) == target_status), 191 HDA_DSP_REG_POLL_INTERVAL_US, 192 chip->rom_init_timeout * 193 USEC_PER_MSEC); 194 if (!ret) { 195 /* set enabled cores mask and increment ref count for cores in init_core_mask */ 196 sdev->enabled_cores_mask |= chip->init_core_mask; 197 mask = sdev->enabled_cores_mask; 198 for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES) 199 sdev->dsp_core_ref_count[j]++; 200 return 0; 201 } 202 203 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 204 dev_err(sdev->dev, 205 "%s: timeout with rom_status_reg (%#x) read\n", 206 __func__, chip->rom_status_reg); 207 208 err: 209 flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL; 210 211 /* after max boot attempts make sure that the dump is printed */ 212 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 213 flags &= ~SOF_DBG_DUMP_OPTIONAL; 214 215 dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d", 216 hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS); 217 snd_sof_dsp_dbg_dump(sdev, dump_msg, flags); 218 hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask); 219 220 kfree(dump_msg); 221 return ret; 222 } 223 224 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd) 225 { 226 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 227 struct hdac_stream *hstream = &hext_stream->hstream; 228 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 229 struct sof_intel_hda_stream *hda_stream; 230 231 /* code loader is special case that reuses stream ops */ 232 switch (cmd) { 233 case SNDRV_PCM_TRIGGER_START: 234 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 235 hext_stream); 236 reinit_completion(&hda_stream->ioc); 237 238 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL, 239 1 << hstream->index, 240 1 << hstream->index); 241 242 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 243 sd_offset, 244 SOF_HDA_SD_CTL_DMA_START | 245 SOF_HDA_CL_DMA_SD_INT_MASK, 246 SOF_HDA_SD_CTL_DMA_START | 247 SOF_HDA_CL_DMA_SD_INT_MASK); 248 249 hstream->running = true; 250 return 0; 251 default: 252 return hda_dsp_stream_trigger(sdev, hext_stream, cmd); 253 } 254 } 255 EXPORT_SYMBOL_NS(hda_cl_trigger, SND_SOC_SOF_INTEL_HDA_COMMON); 256 257 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab, 258 struct hdac_ext_stream *hext_stream) 259 { 260 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 261 struct hdac_stream *hstream = &hext_stream->hstream; 262 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 263 int ret = 0; 264 265 if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) 266 ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0); 267 else 268 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 269 SOF_HDA_SD_CTL_DMA_START, 0); 270 271 hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag); 272 hstream->running = 0; 273 hstream->substream = NULL; 274 275 /* reset BDL address */ 276 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 277 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0); 278 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 279 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0); 280 281 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0); 282 snd_dma_free_pages(dmab); 283 dmab->area = NULL; 284 hstream->bufsize = 0; 285 hstream->format_val = 0; 286 287 return ret; 288 } 289 EXPORT_SYMBOL_NS(hda_cl_cleanup, SND_SOC_SOF_INTEL_HDA_COMMON); 290 291 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500 292 293 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream) 294 { 295 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 296 const struct sof_intel_dsp_desc *chip = hda->desc; 297 struct sof_intel_hda_stream *hda_stream; 298 unsigned long time_left; 299 unsigned int reg; 300 int ret, status; 301 302 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 303 hext_stream); 304 305 dev_dbg(sdev->dev, "Code loader DMA starting\n"); 306 307 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 308 if (ret < 0) { 309 dev_err(sdev->dev, "error: DMA trigger start failed\n"); 310 return ret; 311 } 312 313 /* Wait for completion of transfer */ 314 time_left = wait_for_completion_timeout(&hda_stream->ioc, 315 msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS)); 316 317 if (!time_left) { 318 dev_err(sdev->dev, "Code loader DMA did not complete\n"); 319 return -ETIMEDOUT; 320 } 321 dev_dbg(sdev->dev, "Code loader DMA done, waiting for FW_ENTERED status\n"); 322 323 status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 324 chip->rom_status_reg, reg, 325 (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED), 326 HDA_DSP_REG_POLL_INTERVAL_US, 327 HDA_DSP_BASEFW_TIMEOUT_US); 328 329 /* 330 * even in case of errors we still need to stop the DMAs, 331 * but we return the initial error should the DMA stop also fail 332 */ 333 334 if (status < 0) { 335 dev_err(sdev->dev, 336 "%s: timeout with rom_status_reg (%#x) read\n", 337 __func__, chip->rom_status_reg); 338 } else { 339 dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n"); 340 } 341 342 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 343 if (ret < 0) { 344 dev_err(sdev->dev, "error: DMA trigger stop failed\n"); 345 if (!status) 346 status = ret; 347 } else { 348 dev_dbg(sdev->dev, "Code loader DMA stopped\n"); 349 } 350 351 return status; 352 } 353 354 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev) 355 { 356 struct hdac_ext_stream *iccmax_stream; 357 struct snd_dma_buffer dmab_bdl; 358 int ret, ret1; 359 u8 original_gb; 360 361 /* save the original LTRP guardband value */ 362 original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) & 363 HDA_VS_INTEL_LTRP_GB_MASK; 364 365 /* 366 * Prepare capture stream for ICCMAX. We do not need to store 367 * the data, so use a buffer of PAGE_SIZE for receiving. 368 */ 369 iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE, 370 &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE, true); 371 if (IS_ERR(iccmax_stream)) { 372 dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n"); 373 return PTR_ERR(iccmax_stream); 374 } 375 376 ret = hda_dsp_cl_boot_firmware(sdev); 377 378 /* 379 * Perform iccmax stream cleanup. This should be done even if firmware loading fails. 380 * If the cleanup also fails, we return the initial error 381 */ 382 ret1 = hda_cl_cleanup(sdev->dev, &dmab_bdl, iccmax_stream); 383 if (ret1 < 0) { 384 dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n"); 385 386 /* set return value to indicate cleanup failure */ 387 if (!ret) 388 ret = ret1; 389 } 390 391 /* restore the original guardband value after FW boot */ 392 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP, 393 HDA_VS_INTEL_LTRP_GB_MASK, original_gb); 394 395 return ret; 396 } 397 398 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev) 399 { 400 const struct sof_intel_dsp_desc *chip_info; 401 int ret; 402 403 chip_info = get_chip_info(sdev->pdata); 404 if (chip_info->cl_init) 405 ret = chip_info->cl_init(sdev, 0, true); 406 else 407 ret = -EINVAL; 408 409 if (!ret) 410 hda_sdw_process_wakeen(sdev); 411 412 return ret; 413 } 414 415 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) 416 { 417 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 418 struct snd_sof_pdata *plat_data = sdev->pdata; 419 const struct sof_dev_desc *desc = plat_data->desc; 420 const struct sof_intel_dsp_desc *chip_info; 421 struct hdac_ext_stream *hext_stream; 422 struct firmware stripped_firmware; 423 struct snd_dma_buffer dmab; 424 int ret, ret1, i; 425 426 if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) { 427 dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n"); 428 hda->boot_iteration = 0; 429 ret = hda_dsp_boot_imr(sdev); 430 if (!ret) { 431 hda->booted_from_imr = true; 432 return 0; 433 } 434 435 dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n"); 436 } 437 438 hda->booted_from_imr = false; 439 440 chip_info = desc->chip_info; 441 442 if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) { 443 dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n"); 444 return -EINVAL; 445 } 446 447 stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset; 448 stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset; 449 450 /* init for booting wait */ 451 init_waitqueue_head(&sdev->boot_wait); 452 453 /* prepare DMA for code loader stream */ 454 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 455 stripped_firmware.size, 456 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 457 if (IS_ERR(hext_stream)) { 458 dev_err(sdev->dev, "error: dma prepare for fw loading failed\n"); 459 return PTR_ERR(hext_stream); 460 } 461 462 memcpy(dmab.area, stripped_firmware.data, 463 stripped_firmware.size); 464 465 /* try ROM init a few times before giving up */ 466 for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) { 467 dev_dbg(sdev->dev, 468 "Attempting iteration %d of Core En/ROM load...\n", i); 469 470 hda->boot_iteration = i + 1; 471 if (chip_info->cl_init) 472 ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false); 473 else 474 ret = -EINVAL; 475 476 /* don't retry anymore if successful */ 477 if (!ret) 478 break; 479 } 480 481 if (i == HDA_FW_BOOT_ATTEMPTS) { 482 dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n", 483 i, ret); 484 goto cleanup; 485 } 486 487 /* 488 * When a SoundWire link is in clock stop state, a Slave 489 * device may trigger in-band wakes for events such as jack 490 * insertion or acoustic event detection. This event will lead 491 * to a WAKEEN interrupt, handled by the PCI device and routed 492 * to PME if the PCI device is in D3. The resume function in 493 * audio PCI driver will be invoked by ACPI for PME event and 494 * initialize the device and process WAKEEN interrupt. 495 * 496 * The WAKEEN interrupt should be processed ASAP to prevent an 497 * interrupt flood, otherwise other interrupts, such IPC, 498 * cannot work normally. The WAKEEN is handled after the ROM 499 * is initialized successfully, which ensures power rails are 500 * enabled before accessing the SoundWire SHIM registers 501 */ 502 if (!sdev->first_boot) 503 hda_sdw_process_wakeen(sdev); 504 505 /* 506 * Set the boot_iteration to the last attempt, indicating that the 507 * DSP ROM has been initialized and from this point there will be no 508 * retry done to boot. 509 * 510 * Continue with code loading and firmware boot 511 */ 512 hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS; 513 ret = hda_cl_copy_fw(sdev, hext_stream); 514 if (!ret) { 515 dev_dbg(sdev->dev, "Firmware download successful, booting...\n"); 516 hda->skip_imr_boot = false; 517 } else { 518 snd_sof_dsp_dbg_dump(sdev, "Firmware download failed", 519 SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX); 520 hda->skip_imr_boot = true; 521 } 522 523 cleanup: 524 /* 525 * Perform codeloader stream cleanup. 526 * This should be done even if firmware loading fails. 527 * If the cleanup also fails, we return the initial error 528 */ 529 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 530 if (ret1 < 0) { 531 dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n"); 532 533 /* set return value to indicate cleanup failure */ 534 if (!ret) 535 ret = ret1; 536 } 537 538 /* 539 * return primary core id if both fw copy 540 * and stream clean up are successful 541 */ 542 if (!ret) 543 return chip_info->init_core_mask; 544 545 /* disable DSP */ 546 hda_dsp_ctrl_ppcap_enable(sdev, false); 547 548 return ret; 549 } 550 551 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev, 552 struct sof_ipc4_fw_library *fw_lib, bool reload) 553 { 554 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 555 struct sof_ipc4_fw_data *ipc4_data = sdev->private; 556 struct hdac_ext_stream *hext_stream; 557 struct firmware stripped_firmware; 558 struct sof_ipc4_msg msg = {}; 559 struct snd_dma_buffer dmab; 560 int ret, ret1; 561 562 /* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */ 563 if (reload && hda->booted_from_imr && ipc4_data->fw_context_save) 564 return 0; 565 566 /* the fw_lib has been verified during loading, we can trust the validity here */ 567 stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset; 568 stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset; 569 570 /* prepare DMA for code loader stream */ 571 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 572 stripped_firmware.size, 573 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 574 if (IS_ERR(hext_stream)) { 575 dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__); 576 return PTR_ERR(hext_stream); 577 } 578 579 memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size); 580 581 /* 582 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE 583 * Message includes the dma_id to be prepared for the library loading. 584 * If the firmware does not have support for the message, we will 585 * receive -EOPNOTSUPP. In this case we will use single step library 586 * loading and proceed to send the LOAD_LIBRARY message. 587 */ 588 msg.primary = hext_stream->hstream.stream_tag - 1; 589 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE); 590 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST); 591 msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG); 592 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 593 if (!ret) { 594 int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream); 595 unsigned int status; 596 597 /* 598 * Make sure that the FIFOS value is not 0 in SDxFIFOS register 599 * which indicates that the firmware set the GEN bit and we can 600 * continue to start the DMA 601 */ 602 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR, 603 sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE, 604 status, 605 status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK, 606 HDA_DSP_REG_POLL_INTERVAL_US, 607 HDA_DSP_BASEFW_TIMEOUT_US); 608 609 if (ret < 0) 610 dev_warn(sdev->dev, 611 "%s: timeout waiting for FIFOS\n", __func__); 612 } else if (ret != -EOPNOTSUPP) { 613 goto cleanup; 614 } 615 616 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 617 if (ret < 0) { 618 dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__); 619 goto cleanup; 620 } 621 622 /* 623 * 2nd stage: LOAD_LIBRARY 624 * Message includes the dma_id and the lib_id, the dma_id must be 625 * identical to the one sent via LOAD_LIBRARY_PREPARE 626 */ 627 msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK; 628 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY); 629 msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id); 630 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 631 632 /* Stop the DMA channel */ 633 ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 634 if (ret1 < 0) { 635 dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__); 636 if (!ret) 637 ret = ret1; 638 } 639 640 cleanup: 641 /* clean up even in case of error and return the first error */ 642 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 643 if (ret1 < 0) { 644 dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__); 645 646 /* set return value to indicate cleanup failure */ 647 if (!ret) 648 ret = ret1; 649 } 650 651 return ret; 652 } 653 654 /* pre fw run operations */ 655 int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev) 656 { 657 /* disable clock gating and power gating */ 658 return hda_dsp_ctrl_clock_power_gating(sdev, false); 659 } 660 661 /* post fw run operations */ 662 int hda_dsp_post_fw_run(struct snd_sof_dev *sdev) 663 { 664 int ret; 665 666 if (sdev->first_boot) { 667 struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata; 668 669 ret = hda_sdw_startup(sdev); 670 if (ret < 0) { 671 dev_err(sdev->dev, 672 "error: could not startup SoundWire links\n"); 673 return ret; 674 } 675 676 /* Check if IMR boot is usable */ 677 if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) && 678 (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT || 679 sdev->pdata->ipc_type == SOF_IPC_TYPE_4)) { 680 hdev->imrboot_supported = true; 681 debugfs_create_bool("skip_imr_boot", 682 0644, sdev->debugfs_root, 683 &hdev->skip_imr_boot); 684 } 685 } 686 687 hda_sdw_int_enable(sdev, true); 688 689 /* re-enable clock gating and power gating */ 690 return hda_dsp_ctrl_clock_power_gating(sdev, true); 691 } 692 693 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev, 694 const struct sof_ext_man_elem_header *hdr) 695 { 696 const struct sof_ext_man_cavs_config_data *config_data = 697 container_of(hdr, struct sof_ext_man_cavs_config_data, hdr); 698 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 699 int i, elem_num; 700 701 /* calculate total number of config data elements */ 702 elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header)) 703 / sizeof(struct sof_config_elem); 704 if (elem_num <= 0) { 705 dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num); 706 return -EINVAL; 707 } 708 709 for (i = 0; i < elem_num; i++) 710 switch (config_data->elems[i].token) { 711 case SOF_EXT_MAN_CAVS_CONFIG_EMPTY: 712 /* skip empty token */ 713 break; 714 case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO: 715 hda->clk_config_lpro = config_data->elems[i].value; 716 dev_dbg(sdev->dev, "FW clock config: %s\n", 717 hda->clk_config_lpro ? "LPRO" : "HPRO"); 718 break; 719 case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE: 720 case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE: 721 /* These elements are defined but not being used yet. No warn is required */ 722 break; 723 default: 724 dev_info(sdev->dev, "unsupported token type: %d\n", 725 config_data->elems[i].token); 726 } 727 728 return 0; 729 } 730