1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for generic Intel audio DSP HDA IP 16 */ 17 18 #include <linux/module.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include <sound/hda-mlink.h> 22 #include <trace/events/sof_intel.h> 23 #include <sound/sof/xtensa.h> 24 #include "../sof-audio.h" 25 #include "../ops.h" 26 #include "hda.h" 27 #include "mtl.h" 28 #include "hda-ipc.h" 29 30 #define EXCEPT_MAX_HDR_SIZE 0x400 31 #define HDA_EXT_ROM_STATUS_SIZE 8 32 33 struct hda_dsp_msg_code { 34 u32 code; 35 const char *text; 36 }; 37 38 static bool hda_enable_trace_D0I3_S0; 39 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG) 40 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444); 41 MODULE_PARM_DESC(enable_trace_D0I3_S0, 42 "SOF HDA enable trace when the DSP is in D0I3 in S0"); 43 #endif 44 45 static void hda_get_interfaces(struct snd_sof_dev *sdev, u32 *interface_mask) 46 { 47 const struct sof_intel_dsp_desc *chip; 48 49 chip = get_chip_info(sdev->pdata); 50 switch (chip->hw_ip_version) { 51 case SOF_INTEL_TANGIER: 52 case SOF_INTEL_BAYTRAIL: 53 case SOF_INTEL_BROADWELL: 54 interface_mask[SOF_DAI_DSP_ACCESS] = BIT(SOF_DAI_INTEL_SSP); 55 break; 56 case SOF_INTEL_CAVS_1_5: 57 case SOF_INTEL_CAVS_1_5_PLUS: 58 interface_mask[SOF_DAI_DSP_ACCESS] = 59 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | BIT(SOF_DAI_INTEL_HDA); 60 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 61 break; 62 case SOF_INTEL_CAVS_1_8: 63 case SOF_INTEL_CAVS_2_0: 64 case SOF_INTEL_CAVS_2_5: 65 case SOF_INTEL_ACE_1_0: 66 interface_mask[SOF_DAI_DSP_ACCESS] = 67 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 68 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 69 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 70 break; 71 case SOF_INTEL_ACE_2_0: 72 case SOF_INTEL_ACE_3_0: 73 interface_mask[SOF_DAI_DSP_ACCESS] = 74 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 75 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 76 /* all interfaces accessible without DSP */ 77 interface_mask[SOF_DAI_HOST_ACCESS] = 78 interface_mask[SOF_DAI_DSP_ACCESS]; 79 break; 80 default: 81 break; 82 } 83 } 84 85 u32 hda_get_interface_mask(struct snd_sof_dev *sdev) 86 { 87 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 88 89 hda_get_interfaces(sdev, interface_mask); 90 91 return interface_mask[sdev->dspless_mode_selected]; 92 } 93 EXPORT_SYMBOL_NS(hda_get_interface_mask, SND_SOC_SOF_INTEL_HDA_COMMON); 94 95 bool hda_is_chain_dma_supported(struct snd_sof_dev *sdev, u32 dai_type) 96 { 97 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 98 const struct sof_intel_dsp_desc *chip; 99 100 if (sdev->dspless_mode_selected) 101 return false; 102 103 hda_get_interfaces(sdev, interface_mask); 104 105 if (!(interface_mask[SOF_DAI_DSP_ACCESS] & BIT(dai_type))) 106 return false; 107 108 if (dai_type == SOF_DAI_INTEL_HDA) 109 return true; 110 111 switch (dai_type) { 112 case SOF_DAI_INTEL_SSP: 113 case SOF_DAI_INTEL_DMIC: 114 case SOF_DAI_INTEL_ALH: 115 chip = get_chip_info(sdev->pdata); 116 if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) 117 return false; 118 return true; 119 default: 120 return false; 121 } 122 } 123 EXPORT_SYMBOL_NS(hda_is_chain_dma_supported, SND_SOC_SOF_INTEL_HDA_COMMON); 124 125 /* 126 * DSP Core control. 127 */ 128 129 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask) 130 { 131 u32 adspcs; 132 u32 reset; 133 int ret; 134 135 /* set reset bits for cores */ 136 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 137 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 138 HDA_DSP_REG_ADSPCS, 139 reset, reset); 140 141 /* poll with timeout to check if operation successful */ 142 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 143 HDA_DSP_REG_ADSPCS, adspcs, 144 ((adspcs & reset) == reset), 145 HDA_DSP_REG_POLL_INTERVAL_US, 146 HDA_DSP_RESET_TIMEOUT_US); 147 if (ret < 0) { 148 dev_err(sdev->dev, 149 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 150 __func__); 151 return ret; 152 } 153 154 /* has core entered reset ? */ 155 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 156 HDA_DSP_REG_ADSPCS); 157 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 158 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) { 159 dev_err(sdev->dev, 160 "error: reset enter failed: core_mask %x adspcs 0x%x\n", 161 core_mask, adspcs); 162 ret = -EIO; 163 } 164 165 return ret; 166 } 167 168 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask) 169 { 170 unsigned int crst; 171 u32 adspcs; 172 int ret; 173 174 /* clear reset bits for cores */ 175 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 176 HDA_DSP_REG_ADSPCS, 177 HDA_DSP_ADSPCS_CRST_MASK(core_mask), 178 0); 179 180 /* poll with timeout to check if operation successful */ 181 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 182 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 183 HDA_DSP_REG_ADSPCS, adspcs, 184 !(adspcs & crst), 185 HDA_DSP_REG_POLL_INTERVAL_US, 186 HDA_DSP_RESET_TIMEOUT_US); 187 188 if (ret < 0) { 189 dev_err(sdev->dev, 190 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 191 __func__); 192 return ret; 193 } 194 195 /* has core left reset ? */ 196 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 197 HDA_DSP_REG_ADSPCS); 198 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) { 199 dev_err(sdev->dev, 200 "error: reset leave failed: core_mask %x adspcs 0x%x\n", 201 core_mask, adspcs); 202 ret = -EIO; 203 } 204 205 return ret; 206 } 207 208 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask) 209 { 210 /* stall core */ 211 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 212 HDA_DSP_REG_ADSPCS, 213 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 214 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 215 216 /* set reset state */ 217 return hda_dsp_core_reset_enter(sdev, core_mask); 218 } 219 EXPORT_SYMBOL_NS(hda_dsp_core_stall_reset, SND_SOC_SOF_INTEL_HDA_COMMON); 220 221 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask) 222 { 223 int val; 224 bool is_enable; 225 226 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS); 227 228 #define MASK_IS_EQUAL(v, m, field) ({ \ 229 u32 _m = field(m); \ 230 ((v) & _m) == _m; \ 231 }) 232 233 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && 234 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && 235 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && 236 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 237 238 #undef MASK_IS_EQUAL 239 240 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", 241 is_enable, core_mask); 242 243 return is_enable; 244 } 245 EXPORT_SYMBOL_NS(hda_dsp_core_is_enabled, SND_SOC_SOF_INTEL_HDA_COMMON); 246 247 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask) 248 { 249 int ret; 250 251 /* leave reset state */ 252 ret = hda_dsp_core_reset_leave(sdev, core_mask); 253 if (ret < 0) 254 return ret; 255 256 /* run core */ 257 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask); 258 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 259 HDA_DSP_REG_ADSPCS, 260 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 261 0); 262 263 /* is core now running ? */ 264 if (!hda_dsp_core_is_enabled(sdev, core_mask)) { 265 hda_dsp_core_stall_reset(sdev, core_mask); 266 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n", 267 core_mask); 268 ret = -EIO; 269 } 270 271 return ret; 272 } 273 EXPORT_SYMBOL_NS(hda_dsp_core_run, SND_SOC_SOF_INTEL_HDA_COMMON); 274 275 /* 276 * Power Management. 277 */ 278 279 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask) 280 { 281 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 282 const struct sof_intel_dsp_desc *chip = hda->desc; 283 unsigned int cpa; 284 u32 adspcs; 285 int ret; 286 287 /* restrict core_mask to host managed cores mask */ 288 core_mask &= chip->host_managed_cores_mask; 289 /* return if core_mask is not valid */ 290 if (!core_mask) 291 return 0; 292 293 /* update bits */ 294 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS, 295 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 296 HDA_DSP_ADSPCS_SPA_MASK(core_mask)); 297 298 /* poll with timeout to check if operation successful */ 299 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask); 300 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 301 HDA_DSP_REG_ADSPCS, adspcs, 302 (adspcs & cpa) == cpa, 303 HDA_DSP_REG_POLL_INTERVAL_US, 304 HDA_DSP_RESET_TIMEOUT_US); 305 if (ret < 0) { 306 dev_err(sdev->dev, 307 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 308 __func__); 309 return ret; 310 } 311 312 /* did core power up ? */ 313 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 314 HDA_DSP_REG_ADSPCS); 315 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) != 316 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) { 317 dev_err(sdev->dev, 318 "error: power up core failed core_mask %xadspcs 0x%x\n", 319 core_mask, adspcs); 320 ret = -EIO; 321 } 322 323 return ret; 324 } 325 EXPORT_SYMBOL_NS(hda_dsp_core_power_up, SND_SOC_SOF_INTEL_HDA_COMMON); 326 327 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask) 328 { 329 u32 adspcs; 330 int ret; 331 332 /* update bits */ 333 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 334 HDA_DSP_REG_ADSPCS, 335 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0); 336 337 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 338 HDA_DSP_REG_ADSPCS, adspcs, 339 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), 340 HDA_DSP_REG_POLL_INTERVAL_US, 341 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); 342 if (ret < 0) 343 dev_err(sdev->dev, 344 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 345 __func__); 346 347 return ret; 348 } 349 350 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask) 351 { 352 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 353 const struct sof_intel_dsp_desc *chip = hda->desc; 354 int ret; 355 356 /* restrict core_mask to host managed cores mask */ 357 core_mask &= chip->host_managed_cores_mask; 358 359 /* return if core_mask is not valid or cores are already enabled */ 360 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask)) 361 return 0; 362 363 /* power up */ 364 ret = hda_dsp_core_power_up(sdev, core_mask); 365 if (ret < 0) { 366 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n", 367 core_mask); 368 return ret; 369 } 370 371 return hda_dsp_core_run(sdev, core_mask); 372 } 373 EXPORT_SYMBOL_NS(hda_dsp_enable_core, SND_SOC_SOF_INTEL_HDA_COMMON); 374 375 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev, 376 unsigned int core_mask) 377 { 378 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 379 const struct sof_intel_dsp_desc *chip = hda->desc; 380 int ret; 381 382 /* restrict core_mask to host managed cores mask */ 383 core_mask &= chip->host_managed_cores_mask; 384 385 /* return if core_mask is not valid */ 386 if (!core_mask) 387 return 0; 388 389 /* place core in reset prior to power down */ 390 ret = hda_dsp_core_stall_reset(sdev, core_mask); 391 if (ret < 0) { 392 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n", 393 core_mask); 394 return ret; 395 } 396 397 /* power down core */ 398 ret = hda_dsp_core_power_down(sdev, core_mask); 399 if (ret < 0) { 400 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n", 401 core_mask, ret); 402 return ret; 403 } 404 405 /* make sure we are in OFF state */ 406 if (hda_dsp_core_is_enabled(sdev, core_mask)) { 407 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n", 408 core_mask, ret); 409 ret = -EIO; 410 } 411 412 return ret; 413 } 414 EXPORT_SYMBOL_NS(hda_dsp_core_reset_power_down, SND_SOC_SOF_INTEL_HDA_COMMON); 415 416 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev) 417 { 418 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 419 const struct sof_intel_dsp_desc *chip = hda->desc; 420 421 if (sdev->dspless_mode_selected) 422 return; 423 424 /* enable IPC DONE and BUSY interrupts */ 425 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 426 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY, 427 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY); 428 429 /* enable IPC interrupt */ 430 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 431 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC); 432 } 433 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 434 435 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev) 436 { 437 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 438 const struct sof_intel_dsp_desc *chip = hda->desc; 439 440 if (sdev->dspless_mode_selected) 441 return; 442 443 /* disable IPC interrupt */ 444 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 445 HDA_DSP_ADSPIC_IPC, 0); 446 447 /* disable IPC BUSY and DONE interrupt */ 448 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 449 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0); 450 } 451 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_disable, SND_SOC_SOF_INTEL_HDA_COMMON); 452 453 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev) 454 { 455 int retry = HDA_DSP_REG_POLL_RETRY_COUNT; 456 struct snd_sof_pdata *pdata = sdev->pdata; 457 const struct sof_intel_dsp_desc *chip; 458 459 chip = get_chip_info(pdata); 460 while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) & 461 SOF_HDA_VS_D0I3C_CIP) { 462 if (!retry--) 463 return -ETIMEDOUT; 464 usleep_range(10, 15); 465 } 466 467 return 0; 468 } 469 470 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags) 471 { 472 const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm); 473 474 if (pm_ops && pm_ops->set_pm_gate) 475 return pm_ops->set_pm_gate(sdev, flags); 476 477 return 0; 478 } 479 480 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) 481 { 482 struct snd_sof_pdata *pdata = sdev->pdata; 483 const struct sof_intel_dsp_desc *chip; 484 int ret; 485 u8 reg; 486 487 chip = get_chip_info(pdata); 488 489 /* Write to D0I3C after Command-In-Progress bit is cleared */ 490 ret = hda_dsp_wait_d0i3c_done(sdev); 491 if (ret < 0) { 492 dev_err(sdev->dev, "CIP timeout before D0I3C update!\n"); 493 return ret; 494 } 495 496 /* Update D0I3C register */ 497 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset, 498 SOF_HDA_VS_D0I3C_I3, value); 499 500 /* 501 * The value written to the D0I3C::I3 bit may not be taken into account immediately. 502 * A delay is recommended before checking if D0I3C::CIP is cleared 503 */ 504 usleep_range(30, 40); 505 506 /* Wait for cmd in progress to be cleared before exiting the function */ 507 ret = hda_dsp_wait_d0i3c_done(sdev); 508 if (ret < 0) { 509 dev_err(sdev->dev, "CIP timeout after D0I3C update!\n"); 510 return ret; 511 } 512 513 reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset); 514 /* Confirm d0i3 state changed with paranoia check */ 515 if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) { 516 dev_err(sdev->dev, "failed to update D0I3C!\n"); 517 return -EIO; 518 } 519 520 trace_sof_intel_D0I3C_updated(sdev, reg); 521 522 return 0; 523 } 524 525 /* 526 * d0i3 streaming is enabled if all the active streams can 527 * work in d0i3 state and playback is enabled 528 */ 529 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev) 530 { 531 struct snd_pcm_substream *substream; 532 struct snd_sof_pcm *spcm; 533 bool playback_active = false; 534 int dir; 535 536 list_for_each_entry(spcm, &sdev->pcm_list, list) { 537 for_each_pcm_streams(dir) { 538 substream = spcm->stream[dir].substream; 539 if (!substream || !substream->runtime) 540 continue; 541 542 if (!spcm->stream[dir].d0i3_compatible) 543 return false; 544 545 if (dir == SNDRV_PCM_STREAM_PLAYBACK) 546 playback_active = true; 547 } 548 } 549 550 return playback_active; 551 } 552 553 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev, 554 const struct sof_dsp_power_state *target_state) 555 { 556 u32 flags = 0; 557 int ret; 558 u8 value = 0; 559 560 /* 561 * Sanity check for illegal state transitions 562 * The only allowed transitions are: 563 * 1. D3 -> D0I0 564 * 2. D0I0 -> D0I3 565 * 3. D0I3 -> D0I0 566 */ 567 switch (sdev->dsp_power_state.state) { 568 case SOF_DSP_PM_D0: 569 /* Follow the sequence below for D0 substate transitions */ 570 break; 571 case SOF_DSP_PM_D3: 572 /* Follow regular flow for D3 -> D0 transition */ 573 return 0; 574 default: 575 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n", 576 sdev->dsp_power_state.state, target_state->state); 577 return -EINVAL; 578 } 579 580 /* Set flags and register value for D0 target substate */ 581 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) { 582 value = SOF_HDA_VS_D0I3C_I3; 583 584 /* 585 * Trace DMA need to be disabled when the DSP enters 586 * D0I3 for S0Ix suspend, but it can be kept enabled 587 * when the DSP enters D0I3 while the system is in S0 588 * for debug purpose. 589 */ 590 if (!sdev->fw_trace_is_supported || 591 !hda_enable_trace_D0I3_S0 || 592 sdev->system_suspend_target != SOF_SUSPEND_NONE) 593 flags = HDA_PM_NO_DMA_TRACE; 594 595 if (hda_dsp_d0i3_streaming_applicable(sdev)) 596 flags |= HDA_PM_PG_STREAMING; 597 } else { 598 /* prevent power gating in D0I0 */ 599 flags = HDA_PM_PPG; 600 } 601 602 /* update D0I3C register */ 603 ret = hda_dsp_update_d0i3c_register(sdev, value); 604 if (ret < 0) 605 return ret; 606 607 /* 608 * Notify the DSP of the state change. 609 * If this IPC fails, revert the D0I3C register update in order 610 * to prevent partial state change. 611 */ 612 ret = hda_dsp_send_pm_gate_ipc(sdev, flags); 613 if (ret < 0) { 614 dev_err(sdev->dev, 615 "error: PM_GATE ipc error %d\n", ret); 616 goto revert; 617 } 618 619 return ret; 620 621 revert: 622 /* fallback to the previous register value */ 623 value = value ? 0 : SOF_HDA_VS_D0I3C_I3; 624 625 /* 626 * This can fail but return the IPC error to signal that 627 * the state change failed. 628 */ 629 hda_dsp_update_d0i3c_register(sdev, value); 630 631 return ret; 632 } 633 634 /* helper to log DSP state */ 635 static void hda_dsp_state_log(struct snd_sof_dev *sdev) 636 { 637 switch (sdev->dsp_power_state.state) { 638 case SOF_DSP_PM_D0: 639 switch (sdev->dsp_power_state.substate) { 640 case SOF_HDA_DSP_PM_D0I0: 641 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n"); 642 break; 643 case SOF_HDA_DSP_PM_D0I3: 644 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n"); 645 break; 646 default: 647 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n", 648 sdev->dsp_power_state.substate); 649 break; 650 } 651 break; 652 case SOF_DSP_PM_D1: 653 dev_dbg(sdev->dev, "Current DSP power state: D1\n"); 654 break; 655 case SOF_DSP_PM_D2: 656 dev_dbg(sdev->dev, "Current DSP power state: D2\n"); 657 break; 658 case SOF_DSP_PM_D3: 659 dev_dbg(sdev->dev, "Current DSP power state: D3\n"); 660 break; 661 default: 662 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n", 663 sdev->dsp_power_state.state); 664 break; 665 } 666 } 667 668 /* 669 * All DSP power state transitions are initiated by the driver. 670 * If the requested state change fails, the error is simply returned. 671 * Further state transitions are attempted only when the set_power_save() op 672 * is called again either because of a new IPC sent to the DSP or 673 * during system suspend/resume. 674 */ 675 static int hda_dsp_set_power_state(struct snd_sof_dev *sdev, 676 const struct sof_dsp_power_state *target_state) 677 { 678 int ret = 0; 679 680 switch (target_state->state) { 681 case SOF_DSP_PM_D0: 682 ret = hda_dsp_set_D0_state(sdev, target_state); 683 break; 684 case SOF_DSP_PM_D3: 685 /* The only allowed transition is: D0I0 -> D3 */ 686 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 && 687 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0) 688 break; 689 690 dev_err(sdev->dev, 691 "error: transition from %d to %d not allowed\n", 692 sdev->dsp_power_state.state, target_state->state); 693 return -EINVAL; 694 default: 695 dev_err(sdev->dev, "error: target state unsupported %d\n", 696 target_state->state); 697 return -EINVAL; 698 } 699 if (ret < 0) { 700 dev_err(sdev->dev, 701 "failed to set requested target DSP state %d substate %d\n", 702 target_state->state, target_state->substate); 703 return ret; 704 } 705 706 sdev->dsp_power_state = *target_state; 707 hda_dsp_state_log(sdev); 708 return ret; 709 } 710 711 int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev, 712 const struct sof_dsp_power_state *target_state) 713 { 714 /* 715 * When the DSP is already in D0I3 and the target state is D0I3, 716 * it could be the case that the DSP is in D0I3 during S0 717 * and the system is suspending to S0Ix. Therefore, 718 * hda_dsp_set_D0_state() must be called to disable trace DMA 719 * by sending the PM_GATE IPC to the FW. 720 */ 721 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 && 722 sdev->system_suspend_target == SOF_SUSPEND_S0IX) 723 return hda_dsp_set_power_state(sdev, target_state); 724 725 /* 726 * For all other cases, return without doing anything if 727 * the DSP is already in the target state. 728 */ 729 if (target_state->state == sdev->dsp_power_state.state && 730 target_state->substate == sdev->dsp_power_state.substate) 731 return 0; 732 733 return hda_dsp_set_power_state(sdev, target_state); 734 } 735 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc3, SND_SOC_SOF_INTEL_HDA_COMMON); 736 737 int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev, 738 const struct sof_dsp_power_state *target_state) 739 { 740 /* Return without doing anything if the DSP is already in the target state */ 741 if (target_state->state == sdev->dsp_power_state.state && 742 target_state->substate == sdev->dsp_power_state.substate) 743 return 0; 744 745 return hda_dsp_set_power_state(sdev, target_state); 746 } 747 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc4, SND_SOC_SOF_INTEL_HDA_COMMON); 748 749 /* 750 * Audio DSP states may transform as below:- 751 * 752 * Opportunistic D0I3 in S0 753 * Runtime +---------------------+ Delayed D0i3 work timeout 754 * suspend | +--------------------+ 755 * +------------+ D0I0(active) | | 756 * | | <---------------+ | 757 * | +--------> | New IPC | | 758 * | |Runtime +--^--+---------^--+--+ (via mailbox) | | 759 * | |resume | | | | | | 760 * | | | | | | | | 761 * | | System| | | | | | 762 * | | resume| | S3/S0IX | | | | 763 * | | | | suspend | | S0IX | | 764 * | | | | | |suspend | | 765 * | | | | | | | | 766 * | | | | | | | | 767 * +-v---+-----------+--v-------+ | | +------+----v----+ 768 * | | | +-----------> | 769 * | D3 (suspended) | | | D0I3 | 770 * | | +--------------+ | 771 * | | System resume | | 772 * +----------------------------+ +----------------+ 773 * 774 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams 775 * ignored the suspend trigger. Otherwise the DSP 776 * is in D3. 777 */ 778 779 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) 780 { 781 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 782 const struct sof_intel_dsp_desc *chip = hda->desc; 783 struct hdac_bus *bus = sof_to_bus(sdev); 784 bool imr_lost = false; 785 int ret, j; 786 787 /* 788 * The memory used for IMR boot loses its content in deeper than S3 789 * state on CAVS platforms. 790 * On ACE platforms due to the system architecture the IMR content is 791 * lost at S3 state already, they are tailored for s2idle use. 792 * We must not try IMR boot on next power up in these cases as it will 793 * fail. 794 */ 795 if (sdev->system_suspend_target > SOF_SUSPEND_S3 || 796 (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 && 797 sdev->system_suspend_target == SOF_SUSPEND_S3)) 798 imr_lost = true; 799 800 /* 801 * In case of firmware crash or boot failure set the skip_imr_boot to true 802 * as well in order to try to re-load the firmware to do a 'cold' boot. 803 */ 804 if (imr_lost || sdev->fw_state == SOF_FW_CRASHED || 805 sdev->fw_state == SOF_FW_BOOT_FAILED) 806 hda->skip_imr_boot = true; 807 808 ret = chip->disable_interrupts(sdev); 809 if (ret < 0) 810 return ret; 811 812 /* make sure that no irq handler is pending before shutdown */ 813 synchronize_irq(sdev->ipc_irq); 814 815 hda_codec_jack_wake_enable(sdev, runtime_suspend); 816 817 /* power down all hda links */ 818 hda_bus_ml_suspend(bus); 819 820 if (sdev->dspless_mode_selected) 821 goto skip_dsp; 822 823 ret = chip->power_down_dsp(sdev); 824 if (ret < 0) { 825 dev_err(sdev->dev, "failed to power down DSP during suspend\n"); 826 return ret; 827 } 828 829 /* reset ref counts for all cores */ 830 for (j = 0; j < chip->cores_num; j++) 831 sdev->dsp_core_ref_count[j] = 0; 832 833 /* disable ppcap interrupt */ 834 hda_dsp_ctrl_ppcap_enable(sdev, false); 835 hda_dsp_ctrl_ppcap_int_enable(sdev, false); 836 skip_dsp: 837 838 /* disable hda bus irq and streams */ 839 hda_dsp_ctrl_stop_chip(sdev); 840 841 /* disable LP retention mode */ 842 snd_sof_pci_update_bits(sdev, PCI_PGCTL, 843 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK); 844 845 /* reset controller */ 846 ret = hda_dsp_ctrl_link_reset(sdev, true); 847 if (ret < 0) { 848 dev_err(sdev->dev, 849 "error: failed to reset controller during suspend\n"); 850 return ret; 851 } 852 853 /* display codec can powered off after link reset */ 854 hda_codec_i915_display_power(sdev, false); 855 856 return 0; 857 } 858 859 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume) 860 { 861 const struct sof_intel_dsp_desc *chip; 862 int ret; 863 864 /* display codec must be powered before link reset */ 865 hda_codec_i915_display_power(sdev, true); 866 867 /* 868 * clear TCSEL to clear playback on some HD Audio 869 * codecs. PCI TCSEL is defined in the Intel manuals. 870 */ 871 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0); 872 873 /* reset and start hda controller */ 874 ret = hda_dsp_ctrl_init_chip(sdev); 875 if (ret < 0) { 876 dev_err(sdev->dev, 877 "error: failed to start controller after resume\n"); 878 goto cleanup; 879 } 880 881 /* check jack status */ 882 if (runtime_resume) { 883 hda_codec_jack_wake_enable(sdev, false); 884 if (sdev->system_suspend_target == SOF_SUSPEND_NONE) 885 hda_codec_jack_check(sdev); 886 } 887 888 if (!sdev->dspless_mode_selected) { 889 /* enable ppcap interrupt */ 890 hda_dsp_ctrl_ppcap_enable(sdev, true); 891 hda_dsp_ctrl_ppcap_int_enable(sdev, true); 892 } 893 894 chip = get_chip_info(sdev->pdata); 895 if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0) 896 hda_sdw_int_enable(sdev, true); 897 898 cleanup: 899 /* display codec can powered off after controller init */ 900 hda_codec_i915_display_power(sdev, false); 901 902 return 0; 903 } 904 905 int hda_dsp_resume(struct snd_sof_dev *sdev) 906 { 907 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 908 struct hdac_bus *bus = sof_to_bus(sdev); 909 struct pci_dev *pci = to_pci_dev(sdev->dev); 910 const struct sof_dsp_power_state target_state = { 911 .state = SOF_DSP_PM_D0, 912 .substate = SOF_HDA_DSP_PM_D0I0, 913 }; 914 int ret; 915 916 /* resume from D0I3 */ 917 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) { 918 ret = hda_bus_ml_resume(bus); 919 if (ret < 0) { 920 dev_err(sdev->dev, 921 "error %d in %s: failed to power up links", 922 ret, __func__); 923 return ret; 924 } 925 926 /* set up CORB/RIRB buffers if was on before suspend */ 927 hda_codec_resume_cmd_io(sdev); 928 929 /* Set DSP power state */ 930 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 931 if (ret < 0) { 932 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 933 target_state.state, target_state.substate); 934 return ret; 935 } 936 937 /* restore L1SEN bit */ 938 if (hda->l1_disabled) 939 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 940 HDA_VS_INTEL_EM2, 941 HDA_VS_INTEL_EM2_L1SEN, 0); 942 943 /* restore and disable the system wakeup */ 944 pci_restore_state(pci); 945 disable_irq_wake(pci->irq); 946 return 0; 947 } 948 949 /* init hda controller. DSP cores will be powered up during fw boot */ 950 ret = hda_resume(sdev, false); 951 if (ret < 0) 952 return ret; 953 954 return snd_sof_dsp_set_power_state(sdev, &target_state); 955 } 956 EXPORT_SYMBOL_NS(hda_dsp_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 957 958 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev) 959 { 960 const struct sof_dsp_power_state target_state = { 961 .state = SOF_DSP_PM_D0, 962 }; 963 int ret; 964 965 /* init hda controller. DSP cores will be powered up during fw boot */ 966 ret = hda_resume(sdev, true); 967 if (ret < 0) 968 return ret; 969 970 return snd_sof_dsp_set_power_state(sdev, &target_state); 971 } 972 EXPORT_SYMBOL_NS(hda_dsp_runtime_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 973 974 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev) 975 { 976 struct hdac_bus *hbus = sof_to_bus(sdev); 977 978 if (hbus->codec_powered) { 979 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n", 980 (unsigned int)hbus->codec_powered); 981 return -EBUSY; 982 } 983 984 return 0; 985 } 986 EXPORT_SYMBOL_NS(hda_dsp_runtime_idle, SND_SOC_SOF_INTEL_HDA_COMMON); 987 988 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev) 989 { 990 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 991 const struct sof_dsp_power_state target_state = { 992 .state = SOF_DSP_PM_D3, 993 }; 994 int ret; 995 996 if (!sdev->dspless_mode_selected) { 997 /* cancel any attempt for DSP D0I3 */ 998 cancel_delayed_work_sync(&hda->d0i3_work); 999 } 1000 1001 /* stop hda controller and power dsp off */ 1002 ret = hda_suspend(sdev, true); 1003 if (ret < 0) 1004 return ret; 1005 1006 return snd_sof_dsp_set_power_state(sdev, &target_state); 1007 } 1008 EXPORT_SYMBOL_NS(hda_dsp_runtime_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1009 1010 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state) 1011 { 1012 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 1013 struct hdac_bus *bus = sof_to_bus(sdev); 1014 struct pci_dev *pci = to_pci_dev(sdev->dev); 1015 const struct sof_dsp_power_state target_dsp_state = { 1016 .state = target_state, 1017 .substate = target_state == SOF_DSP_PM_D0 ? 1018 SOF_HDA_DSP_PM_D0I3 : 0, 1019 }; 1020 int ret; 1021 1022 if (!sdev->dspless_mode_selected) { 1023 /* cancel any attempt for DSP D0I3 */ 1024 cancel_delayed_work_sync(&hda->d0i3_work); 1025 } 1026 1027 if (target_state == SOF_DSP_PM_D0) { 1028 /* Set DSP power state */ 1029 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1030 if (ret < 0) { 1031 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 1032 target_dsp_state.state, 1033 target_dsp_state.substate); 1034 return ret; 1035 } 1036 1037 /* enable L1SEN to make sure the system can enter S0Ix */ 1038 if (hda->l1_disabled) 1039 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2, 1040 HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN); 1041 1042 /* stop the CORB/RIRB DMA if it is On */ 1043 hda_codec_suspend_cmd_io(sdev); 1044 1045 /* no link can be powered in s0ix state */ 1046 ret = hda_bus_ml_suspend(bus); 1047 if (ret < 0) { 1048 dev_err(sdev->dev, 1049 "error %d in %s: failed to power down links", 1050 ret, __func__); 1051 return ret; 1052 } 1053 1054 /* enable the system waking up via IPC IRQ */ 1055 enable_irq_wake(pci->irq); 1056 pci_save_state(pci); 1057 return 0; 1058 } 1059 1060 /* stop hda controller and power dsp off */ 1061 ret = hda_suspend(sdev, false); 1062 if (ret < 0) { 1063 dev_err(bus->dev, "error: suspending dsp\n"); 1064 return ret; 1065 } 1066 1067 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1068 } 1069 EXPORT_SYMBOL_NS(hda_dsp_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1070 1071 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev) 1072 { 1073 struct hdac_bus *bus = sof_to_bus(sdev); 1074 struct hdac_stream *s; 1075 unsigned int active_streams = 0; 1076 int sd_offset; 1077 u32 val; 1078 1079 list_for_each_entry(s, &bus->stream_list, list) { 1080 sd_offset = SOF_STREAM_SD_OFFSET(s); 1081 val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, 1082 sd_offset); 1083 if (val & SOF_HDA_SD_CTL_DMA_START) 1084 active_streams |= BIT(s->index); 1085 } 1086 1087 return active_streams; 1088 } 1089 1090 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev) 1091 { 1092 int ret; 1093 1094 /* 1095 * Do not assume a certain timing between the prior 1096 * suspend flow, and running of this quirk function. 1097 * This is needed if the controller was just put 1098 * to reset before calling this function. 1099 */ 1100 usleep_range(500, 1000); 1101 1102 /* 1103 * Take controller out of reset to flush DMA 1104 * transactions. 1105 */ 1106 ret = hda_dsp_ctrl_link_reset(sdev, false); 1107 if (ret < 0) 1108 return ret; 1109 1110 usleep_range(500, 1000); 1111 1112 /* Restore state for shutdown, back to reset */ 1113 ret = hda_dsp_ctrl_link_reset(sdev, true); 1114 if (ret < 0) 1115 return ret; 1116 1117 return ret; 1118 } 1119 1120 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev) 1121 { 1122 unsigned int active_streams; 1123 int ret, ret2; 1124 1125 /* check if DMA cleanup has been successful */ 1126 active_streams = hda_dsp_check_for_dma_streams(sdev); 1127 1128 sdev->system_suspend_target = SOF_SUSPEND_S3; 1129 ret = snd_sof_suspend(sdev->dev); 1130 1131 if (active_streams) { 1132 dev_warn(sdev->dev, 1133 "There were active DSP streams (%#x) at shutdown, trying to recover\n", 1134 active_streams); 1135 ret2 = hda_dsp_s5_quirk(sdev); 1136 if (ret2 < 0) 1137 dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2); 1138 } 1139 1140 return ret; 1141 } 1142 EXPORT_SYMBOL_NS(hda_dsp_shutdown_dma_flush, SND_SOC_SOF_INTEL_HDA_COMMON); 1143 1144 int hda_dsp_shutdown(struct snd_sof_dev *sdev) 1145 { 1146 sdev->system_suspend_target = SOF_SUSPEND_S3; 1147 return snd_sof_suspend(sdev->dev); 1148 } 1149 EXPORT_SYMBOL_NS(hda_dsp_shutdown, SND_SOC_SOF_INTEL_HDA_COMMON); 1150 1151 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev) 1152 { 1153 int ret; 1154 1155 /* make sure all DAI resources are freed */ 1156 ret = hda_dsp_dais_suspend(sdev); 1157 if (ret < 0) 1158 dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__); 1159 1160 return ret; 1161 } 1162 EXPORT_SYMBOL_NS(hda_dsp_set_hw_params_upon_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 1163 1164 void hda_dsp_d0i3_work(struct work_struct *work) 1165 { 1166 struct sof_intel_hda_dev *hdev = container_of(work, 1167 struct sof_intel_hda_dev, 1168 d0i3_work.work); 1169 struct hdac_bus *bus = &hdev->hbus.core; 1170 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev); 1171 struct sof_dsp_power_state target_state = { 1172 .state = SOF_DSP_PM_D0, 1173 .substate = SOF_HDA_DSP_PM_D0I3, 1174 }; 1175 int ret; 1176 1177 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */ 1178 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev)) 1179 /* remain in D0I0 */ 1180 return; 1181 1182 /* This can fail but error cannot be propagated */ 1183 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 1184 if (ret < 0) 1185 dev_err_ratelimited(sdev->dev, 1186 "error: failed to set DSP state %d substate %d\n", 1187 target_state.state, target_state.substate); 1188 } 1189 EXPORT_SYMBOL_NS(hda_dsp_d0i3_work, SND_SOC_SOF_INTEL_HDA_COMMON); 1190 1191 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core) 1192 { 1193 const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm; 1194 int ret, ret1; 1195 1196 /* power up core */ 1197 ret = hda_dsp_enable_core(sdev, BIT(core)); 1198 if (ret < 0) { 1199 dev_err(sdev->dev, "failed to power up core %d with err: %d\n", 1200 core, ret); 1201 return ret; 1202 } 1203 1204 /* No need to send IPC for primary core or if FW boot is not complete */ 1205 if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE) 1206 return 0; 1207 1208 /* No need to continue the set_core_state ops is not available */ 1209 if (!pm_ops->set_core_state) 1210 return 0; 1211 1212 /* Now notify DSP for secondary cores */ 1213 ret = pm_ops->set_core_state(sdev, core, true); 1214 if (ret < 0) { 1215 dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n", 1216 core, ret); 1217 goto power_down; 1218 } 1219 1220 return ret; 1221 1222 power_down: 1223 /* power down core if it is host managed and return the original error if this fails too */ 1224 ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core)); 1225 if (ret1 < 0) 1226 dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1); 1227 1228 return ret; 1229 } 1230 EXPORT_SYMBOL_NS(hda_dsp_core_get, SND_SOC_SOF_INTEL_HDA_COMMON); 1231 1232 #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) 1233 void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable) 1234 { 1235 struct sof_intel_hda_dev *hdev; 1236 1237 hdev = sdev->pdata->hw_pdata; 1238 1239 if (!hdev->sdw) 1240 return; 1241 1242 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2, 1243 HDA_DSP_REG_ADSPIC2_SNDW, 1244 enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0); 1245 } 1246 EXPORT_SYMBOL_NS(hda_common_enable_sdw_irq, SND_SOC_SOF_INTEL_HDA_COMMON); 1247 1248 void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable) 1249 { 1250 u32 interface_mask = hda_get_interface_mask(sdev); 1251 const struct sof_intel_dsp_desc *chip; 1252 1253 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1254 return; 1255 1256 chip = get_chip_info(sdev->pdata); 1257 if (chip && chip->enable_sdw_irq) 1258 chip->enable_sdw_irq(sdev, enable); 1259 } 1260 EXPORT_SYMBOL_NS(hda_sdw_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 1261 1262 int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev) 1263 { 1264 struct sof_intel_hda_dev *hdev; 1265 struct sdw_intel_ctx *ctx; 1266 u32 caps; 1267 1268 hdev = sdev->pdata->hw_pdata; 1269 ctx = hdev->sdw; 1270 1271 caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP); 1272 caps &= SDW_SHIM_LCAP_LCOUNT_MASK; 1273 1274 /* Check HW supported vs property value */ 1275 if (caps < ctx->count) { 1276 dev_err(sdev->dev, 1277 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1278 __func__, ctx->count, caps); 1279 return -EINVAL; 1280 } 1281 1282 return 0; 1283 } 1284 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_common, SND_SOC_SOF_INTEL_HDA_COMMON); 1285 1286 int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev) 1287 { 1288 struct sof_intel_hda_dev *hdev; 1289 struct sdw_intel_ctx *ctx; 1290 struct hdac_bus *bus; 1291 u32 slcount; 1292 1293 bus = sof_to_bus(sdev); 1294 1295 hdev = sdev->pdata->hw_pdata; 1296 ctx = hdev->sdw; 1297 1298 slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW); 1299 1300 /* Check HW supported vs property value */ 1301 if (slcount < ctx->count) { 1302 dev_err(sdev->dev, 1303 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1304 __func__, ctx->count, slcount); 1305 return -EINVAL; 1306 } 1307 1308 return 0; 1309 } 1310 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_ext, SND_SOC_SOF_INTEL_HDA_COMMON); 1311 1312 int hda_sdw_check_lcount(struct snd_sof_dev *sdev) 1313 { 1314 const struct sof_intel_dsp_desc *chip; 1315 1316 chip = get_chip_info(sdev->pdata); 1317 if (chip && chip->read_sdw_lcount) 1318 return chip->read_sdw_lcount(sdev); 1319 1320 return 0; 1321 } 1322 EXPORT_SYMBOL_NS(hda_sdw_check_lcount, SND_SOC_SOF_INTEL_HDA_COMMON); 1323 1324 void hda_sdw_process_wakeen(struct snd_sof_dev *sdev) 1325 { 1326 u32 interface_mask = hda_get_interface_mask(sdev); 1327 const struct sof_intel_dsp_desc *chip; 1328 1329 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1330 return; 1331 1332 chip = get_chip_info(sdev->pdata); 1333 if (chip && chip->sdw_process_wakeen) 1334 chip->sdw_process_wakeen(sdev); 1335 } 1336 EXPORT_SYMBOL_NS(hda_sdw_process_wakeen, SND_SOC_SOF_INTEL_HDA_COMMON); 1337 1338 #endif 1339 1340 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev) 1341 { 1342 hda_sdw_int_enable(sdev, false); 1343 hda_dsp_ipc_int_disable(sdev); 1344 1345 return 0; 1346 } 1347 EXPORT_SYMBOL_NS(hda_dsp_disable_interrupts, SND_SOC_SOF_INTEL_HDA_COMMON); 1348 1349 static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = { 1350 {HDA_DSP_ROM_CSE_ERROR, "error: cse error"}, 1351 {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"}, 1352 {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"}, 1353 {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"}, 1354 {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"}, 1355 {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"}, 1356 {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"}, 1357 {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"}, 1358 {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"}, 1359 {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"}, 1360 {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"}, 1361 {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"}, 1362 {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"}, 1363 {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"}, 1364 {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"}, 1365 {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"}, 1366 }; 1367 1368 #define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state} 1369 static const struct hda_dsp_msg_code cavs_fsr_rom_state_names[] = { 1370 FSR_ROM_STATE_ENTRY(INIT), 1371 FSR_ROM_STATE_ENTRY(INIT_DONE), 1372 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1373 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1374 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1375 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1376 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1377 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1378 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT), 1379 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE), 1380 /* CSE states */ 1381 FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST), 1382 FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED), 1383 FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST), 1384 FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED), 1385 FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT), 1386 FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1), 1387 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY), 1388 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL), 1389 FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN), 1390 }; 1391 1392 static const struct hda_dsp_msg_code ace_fsr_rom_state_names[] = { 1393 FSR_ROM_STATE_ENTRY(INIT), 1394 FSR_ROM_STATE_ENTRY(INIT_DONE), 1395 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1396 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1397 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1398 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1399 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1400 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1401 FSR_ROM_STATE_ENTRY(RESET_VECTOR_DONE), 1402 FSR_ROM_STATE_ENTRY(PURGE_BOOT), 1403 FSR_ROM_STATE_ENTRY(RESTORE_BOOT), 1404 FSR_ROM_STATE_ENTRY(FW_ENTRY_POINT), 1405 FSR_ROM_STATE_ENTRY(VALIDATE_PUB_KEY), 1406 FSR_ROM_STATE_ENTRY(POWER_DOWN_HPSRAM), 1407 FSR_ROM_STATE_ENTRY(POWER_DOWN_ULPSRAM), 1408 FSR_ROM_STATE_ENTRY(POWER_UP_ULPSRAM_STACK), 1409 FSR_ROM_STATE_ENTRY(POWER_UP_HPSRAM_DMA), 1410 FSR_ROM_STATE_ENTRY(BEFORE_EP_POINTER_READ), 1411 FSR_ROM_STATE_ENTRY(VALIDATE_MANIFEST), 1412 FSR_ROM_STATE_ENTRY(VALIDATE_FW_MODULE), 1413 FSR_ROM_STATE_ENTRY(PROTECT_IMR_REGION), 1414 FSR_ROM_STATE_ENTRY(PUSH_MODEL_ROUTINE), 1415 FSR_ROM_STATE_ENTRY(PULL_MODEL_ROUTINE), 1416 FSR_ROM_STATE_ENTRY(VALIDATE_PKG_DIR), 1417 FSR_ROM_STATE_ENTRY(VALIDATE_CPD), 1418 FSR_ROM_STATE_ENTRY(VALIDATE_CSS_MAN_HEADER), 1419 FSR_ROM_STATE_ENTRY(VALIDATE_BLOB_SVN), 1420 FSR_ROM_STATE_ENTRY(VERIFY_IFWI_PARTITION), 1421 FSR_ROM_STATE_ENTRY(REMOVE_ACCESS_CONTROL), 1422 FSR_ROM_STATE_ENTRY(AUTH_BYPASS), 1423 FSR_ROM_STATE_ENTRY(AUTH_ENABLED), 1424 FSR_ROM_STATE_ENTRY(INIT_DMA), 1425 FSR_ROM_STATE_ENTRY(PURGE_FW_ENTRY), 1426 FSR_ROM_STATE_ENTRY(PURGE_FW_END), 1427 FSR_ROM_STATE_ENTRY(CLEAN_UP_BSS_DONE), 1428 FSR_ROM_STATE_ENTRY(IMR_RESTORE_ENTRY), 1429 FSR_ROM_STATE_ENTRY(IMR_RESTORE_END), 1430 FSR_ROM_STATE_ENTRY(FW_MANIFEST_IN_DMA_BUFF), 1431 FSR_ROM_STATE_ENTRY(LOAD_CSE_MAN_TO_IMR), 1432 FSR_ROM_STATE_ENTRY(LOAD_FW_MAN_TO_IMR), 1433 FSR_ROM_STATE_ENTRY(LOAD_FW_CODE_TO_IMR), 1434 FSR_ROM_STATE_ENTRY(FW_LOADING_DONE), 1435 FSR_ROM_STATE_ENTRY(FW_CODE_LOADED), 1436 FSR_ROM_STATE_ENTRY(VERIFY_IMAGE_TYPE), 1437 FSR_ROM_STATE_ENTRY(AUTH_API_INIT), 1438 FSR_ROM_STATE_ENTRY(AUTH_API_PROC), 1439 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_BUSY), 1440 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_RESULT), 1441 FSR_ROM_STATE_ENTRY(AUTH_API_CLEANUP), 1442 }; 1443 1444 #define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state} 1445 static const struct hda_dsp_msg_code fsr_bringup_state_names[] = { 1446 FSR_BRINGUP_STATE_ENTRY(INIT), 1447 FSR_BRINGUP_STATE_ENTRY(INIT_DONE), 1448 FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD), 1449 FSR_BRINGUP_STATE_ENTRY(UNPACK_START), 1450 FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE), 1451 FSR_BRINGUP_STATE_ENTRY(FW_ENTERED), 1452 }; 1453 1454 #define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state} 1455 static const struct hda_dsp_msg_code fsr_wait_state_names[] = { 1456 FSR_WAIT_STATE_ENTRY(IPC_BUSY), 1457 FSR_WAIT_STATE_ENTRY(IPC_DONE), 1458 FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION), 1459 FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF), 1460 FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL), 1461 FSR_WAIT_STATE_ENTRY(CSE_CSR), 1462 }; 1463 1464 #define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod 1465 static const char * const fsr_module_names[] = { 1466 FSR_MODULE_NAME_ENTRY(ROM), 1467 FSR_MODULE_NAME_ENTRY(ROM_BYP), 1468 FSR_MODULE_NAME_ENTRY(BASE_FW), 1469 FSR_MODULE_NAME_ENTRY(LP_BOOT), 1470 FSR_MODULE_NAME_ENTRY(BRNGUP), 1471 FSR_MODULE_NAME_ENTRY(ROM_EXT), 1472 }; 1473 1474 static const char * 1475 hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code, 1476 size_t array_size) 1477 { 1478 int i; 1479 1480 for (i = 0; i < array_size; i++) { 1481 if (code == msg_code[i].code) 1482 return msg_code[i].text; 1483 } 1484 1485 return NULL; 1486 } 1487 1488 void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level) 1489 { 1490 const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata); 1491 const char *state_text, *error_text, *module_text; 1492 u32 fsr, state, wait_state, module, error_code; 1493 1494 fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg); 1495 state = FSR_TO_STATE_CODE(fsr); 1496 wait_state = FSR_TO_WAIT_STATE_CODE(fsr); 1497 module = FSR_TO_MODULE_CODE(fsr); 1498 1499 if (module > FSR_MOD_ROM_EXT) 1500 module_text = "unknown"; 1501 else 1502 module_text = fsr_module_names[module]; 1503 1504 if (module == FSR_MOD_BRNGUP) { 1505 state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names, 1506 ARRAY_SIZE(fsr_bringup_state_names)); 1507 } else { 1508 if (chip->hw_ip_version < SOF_INTEL_ACE_1_0) 1509 state_text = hda_dsp_get_state_text(state, 1510 cavs_fsr_rom_state_names, 1511 ARRAY_SIZE(cavs_fsr_rom_state_names)); 1512 else 1513 state_text = hda_dsp_get_state_text(state, 1514 ace_fsr_rom_state_names, 1515 ARRAY_SIZE(ace_fsr_rom_state_names)); 1516 } 1517 1518 /* not for us, must be generic sof message */ 1519 if (!state_text) { 1520 dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr); 1521 return; 1522 } 1523 1524 if (wait_state) { 1525 const char *wait_state_text; 1526 1527 wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names, 1528 ARRAY_SIZE(fsr_wait_state_names)); 1529 if (!wait_state_text) 1530 wait_state_text = "unknown"; 1531 1532 dev_printk(level, sdev->dev, 1533 "%#010x: module: %s, state: %s, waiting for: %s, %s\n", 1534 fsr, module_text, state_text, wait_state_text, 1535 fsr & FSR_HALTED ? "not running" : "running"); 1536 } else { 1537 dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n", 1538 fsr, module_text, state_text, 1539 fsr & FSR_HALTED ? "not running" : "running"); 1540 } 1541 1542 error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4); 1543 if (!error_code) 1544 return; 1545 1546 error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts, 1547 ARRAY_SIZE(hda_dsp_rom_fw_error_texts)); 1548 if (!error_text) 1549 error_text = "unknown"; 1550 1551 if (state == FSR_STATE_FW_ENTERED) 1552 dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code, 1553 error_text); 1554 else 1555 dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code, 1556 error_text); 1557 } 1558 EXPORT_SYMBOL_NS(hda_dsp_get_state, SND_SOC_SOF_INTEL_HDA_COMMON); 1559 1560 static void hda_dsp_get_registers(struct snd_sof_dev *sdev, 1561 struct sof_ipc_dsp_oops_xtensa *xoops, 1562 struct sof_ipc_panic_info *panic_info, 1563 u32 *stack, size_t stack_words) 1564 { 1565 u32 offset = sdev->dsp_oops_offset; 1566 1567 /* first read registers */ 1568 sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops)); 1569 1570 /* note: variable AR register array is not read */ 1571 1572 /* then get panic info */ 1573 if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) { 1574 dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n", 1575 xoops->arch_hdr.totalsize); 1576 return; 1577 } 1578 offset += xoops->arch_hdr.totalsize; 1579 sof_block_read(sdev, sdev->mmio_bar, offset, 1580 panic_info, sizeof(*panic_info)); 1581 1582 /* then get the stack */ 1583 offset += sizeof(*panic_info); 1584 sof_block_read(sdev, sdev->mmio_bar, offset, stack, 1585 stack_words * sizeof(u32)); 1586 } 1587 1588 /* dump the first 8 dwords representing the extended ROM status */ 1589 void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level, 1590 u32 flags) 1591 { 1592 const struct sof_intel_dsp_desc *chip; 1593 char msg[128]; 1594 int len = 0; 1595 u32 value; 1596 int i; 1597 1598 chip = get_chip_info(sdev->pdata); 1599 for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) { 1600 value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4); 1601 len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value); 1602 } 1603 1604 dev_printk(level, sdev->dev, "extended rom status: %s", msg); 1605 1606 } 1607 1608 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags) 1609 { 1610 char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR; 1611 struct sof_ipc_dsp_oops_xtensa xoops; 1612 struct sof_ipc_panic_info panic_info; 1613 u32 stack[HDA_DSP_STACK_DUMP_SIZE]; 1614 1615 /* print ROM/FW status */ 1616 hda_dsp_get_state(sdev, level); 1617 1618 /* The firmware register dump only available with IPC3 */ 1619 if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC_TYPE_3) { 1620 u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS); 1621 u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP); 1622 1623 hda_dsp_get_registers(sdev, &xoops, &panic_info, stack, 1624 HDA_DSP_STACK_DUMP_SIZE); 1625 sof_print_oops_and_stack(sdev, level, status, panic, &xoops, 1626 &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE); 1627 } else { 1628 hda_dsp_dump_ext_rom_status(sdev, level, flags); 1629 } 1630 } 1631 EXPORT_SYMBOL_NS(hda_dsp_dump, SND_SOC_SOF_INTEL_HDA_COMMON); 1632