1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for generic Intel audio DSP HDA IP 16 */ 17 18 #include <linux/module.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include <sound/hda-mlink.h> 22 #include <trace/events/sof_intel.h> 23 #include <sound/sof/xtensa.h> 24 #include "../sof-audio.h" 25 #include "../ops.h" 26 #include "hda.h" 27 #include "mtl.h" 28 #include "hda-ipc.h" 29 30 #define EXCEPT_MAX_HDR_SIZE 0x400 31 #define HDA_EXT_ROM_STATUS_SIZE 8 32 33 struct hda_dsp_msg_code { 34 u32 code; 35 const char *text; 36 }; 37 38 static bool hda_enable_trace_D0I3_S0; 39 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG) 40 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444); 41 MODULE_PARM_DESC(enable_trace_D0I3_S0, 42 "SOF HDA enable trace when the DSP is in D0I3 in S0"); 43 #endif 44 45 static void hda_get_interfaces(struct snd_sof_dev *sdev, u32 *interface_mask) 46 { 47 const struct sof_intel_dsp_desc *chip; 48 49 chip = get_chip_info(sdev->pdata); 50 switch (chip->hw_ip_version) { 51 case SOF_INTEL_TANGIER: 52 case SOF_INTEL_BAYTRAIL: 53 case SOF_INTEL_BROADWELL: 54 interface_mask[SOF_DAI_DSP_ACCESS] = BIT(SOF_DAI_INTEL_SSP); 55 break; 56 case SOF_INTEL_CAVS_1_5: 57 case SOF_INTEL_CAVS_1_5_PLUS: 58 interface_mask[SOF_DAI_DSP_ACCESS] = 59 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | BIT(SOF_DAI_INTEL_HDA); 60 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 61 break; 62 case SOF_INTEL_CAVS_1_8: 63 case SOF_INTEL_CAVS_2_0: 64 case SOF_INTEL_CAVS_2_5: 65 case SOF_INTEL_ACE_1_0: 66 interface_mask[SOF_DAI_DSP_ACCESS] = 67 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 68 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 69 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 70 break; 71 case SOF_INTEL_ACE_2_0: 72 interface_mask[SOF_DAI_DSP_ACCESS] = 73 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 74 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 75 /* all interfaces accessible without DSP */ 76 interface_mask[SOF_DAI_HOST_ACCESS] = 77 interface_mask[SOF_DAI_DSP_ACCESS]; 78 break; 79 default: 80 break; 81 } 82 } 83 84 u32 hda_get_interface_mask(struct snd_sof_dev *sdev) 85 { 86 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 87 88 hda_get_interfaces(sdev, interface_mask); 89 90 return interface_mask[sdev->dspless_mode_selected]; 91 } 92 EXPORT_SYMBOL_NS(hda_get_interface_mask, SND_SOC_SOF_INTEL_HDA_COMMON); 93 94 bool hda_is_chain_dma_supported(struct snd_sof_dev *sdev, u32 dai_type) 95 { 96 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 97 const struct sof_intel_dsp_desc *chip; 98 99 if (sdev->dspless_mode_selected) 100 return false; 101 102 hda_get_interfaces(sdev, interface_mask); 103 104 if (!(interface_mask[SOF_DAI_DSP_ACCESS] & BIT(dai_type))) 105 return false; 106 107 if (dai_type == SOF_DAI_INTEL_HDA) 108 return true; 109 110 switch (dai_type) { 111 case SOF_DAI_INTEL_SSP: 112 case SOF_DAI_INTEL_DMIC: 113 case SOF_DAI_INTEL_ALH: 114 chip = get_chip_info(sdev->pdata); 115 if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) 116 return false; 117 return true; 118 default: 119 return false; 120 } 121 } 122 EXPORT_SYMBOL_NS(hda_is_chain_dma_supported, SND_SOC_SOF_INTEL_HDA_COMMON); 123 124 /* 125 * DSP Core control. 126 */ 127 128 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask) 129 { 130 u32 adspcs; 131 u32 reset; 132 int ret; 133 134 /* set reset bits for cores */ 135 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 136 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 137 HDA_DSP_REG_ADSPCS, 138 reset, reset); 139 140 /* poll with timeout to check if operation successful */ 141 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 142 HDA_DSP_REG_ADSPCS, adspcs, 143 ((adspcs & reset) == reset), 144 HDA_DSP_REG_POLL_INTERVAL_US, 145 HDA_DSP_RESET_TIMEOUT_US); 146 if (ret < 0) { 147 dev_err(sdev->dev, 148 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 149 __func__); 150 return ret; 151 } 152 153 /* has core entered reset ? */ 154 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 155 HDA_DSP_REG_ADSPCS); 156 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 157 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) { 158 dev_err(sdev->dev, 159 "error: reset enter failed: core_mask %x adspcs 0x%x\n", 160 core_mask, adspcs); 161 ret = -EIO; 162 } 163 164 return ret; 165 } 166 167 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask) 168 { 169 unsigned int crst; 170 u32 adspcs; 171 int ret; 172 173 /* clear reset bits for cores */ 174 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 175 HDA_DSP_REG_ADSPCS, 176 HDA_DSP_ADSPCS_CRST_MASK(core_mask), 177 0); 178 179 /* poll with timeout to check if operation successful */ 180 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 181 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 182 HDA_DSP_REG_ADSPCS, adspcs, 183 !(adspcs & crst), 184 HDA_DSP_REG_POLL_INTERVAL_US, 185 HDA_DSP_RESET_TIMEOUT_US); 186 187 if (ret < 0) { 188 dev_err(sdev->dev, 189 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 190 __func__); 191 return ret; 192 } 193 194 /* has core left reset ? */ 195 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 196 HDA_DSP_REG_ADSPCS); 197 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) { 198 dev_err(sdev->dev, 199 "error: reset leave failed: core_mask %x adspcs 0x%x\n", 200 core_mask, adspcs); 201 ret = -EIO; 202 } 203 204 return ret; 205 } 206 207 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask) 208 { 209 /* stall core */ 210 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 211 HDA_DSP_REG_ADSPCS, 212 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 213 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 214 215 /* set reset state */ 216 return hda_dsp_core_reset_enter(sdev, core_mask); 217 } 218 EXPORT_SYMBOL_NS(hda_dsp_core_stall_reset, SND_SOC_SOF_INTEL_HDA_COMMON); 219 220 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask) 221 { 222 int val; 223 bool is_enable; 224 225 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS); 226 227 #define MASK_IS_EQUAL(v, m, field) ({ \ 228 u32 _m = field(m); \ 229 ((v) & _m) == _m; \ 230 }) 231 232 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && 233 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && 234 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && 235 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 236 237 #undef MASK_IS_EQUAL 238 239 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", 240 is_enable, core_mask); 241 242 return is_enable; 243 } 244 EXPORT_SYMBOL_NS(hda_dsp_core_is_enabled, SND_SOC_SOF_INTEL_HDA_COMMON); 245 246 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask) 247 { 248 int ret; 249 250 /* leave reset state */ 251 ret = hda_dsp_core_reset_leave(sdev, core_mask); 252 if (ret < 0) 253 return ret; 254 255 /* run core */ 256 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask); 257 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 258 HDA_DSP_REG_ADSPCS, 259 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 260 0); 261 262 /* is core now running ? */ 263 if (!hda_dsp_core_is_enabled(sdev, core_mask)) { 264 hda_dsp_core_stall_reset(sdev, core_mask); 265 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n", 266 core_mask); 267 ret = -EIO; 268 } 269 270 return ret; 271 } 272 EXPORT_SYMBOL_NS(hda_dsp_core_run, SND_SOC_SOF_INTEL_HDA_COMMON); 273 274 /* 275 * Power Management. 276 */ 277 278 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask) 279 { 280 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 281 const struct sof_intel_dsp_desc *chip = hda->desc; 282 unsigned int cpa; 283 u32 adspcs; 284 int ret; 285 286 /* restrict core_mask to host managed cores mask */ 287 core_mask &= chip->host_managed_cores_mask; 288 /* return if core_mask is not valid */ 289 if (!core_mask) 290 return 0; 291 292 /* update bits */ 293 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS, 294 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 295 HDA_DSP_ADSPCS_SPA_MASK(core_mask)); 296 297 /* poll with timeout to check if operation successful */ 298 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask); 299 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 300 HDA_DSP_REG_ADSPCS, adspcs, 301 (adspcs & cpa) == cpa, 302 HDA_DSP_REG_POLL_INTERVAL_US, 303 HDA_DSP_RESET_TIMEOUT_US); 304 if (ret < 0) { 305 dev_err(sdev->dev, 306 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 307 __func__); 308 return ret; 309 } 310 311 /* did core power up ? */ 312 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 313 HDA_DSP_REG_ADSPCS); 314 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) != 315 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) { 316 dev_err(sdev->dev, 317 "error: power up core failed core_mask %xadspcs 0x%x\n", 318 core_mask, adspcs); 319 ret = -EIO; 320 } 321 322 return ret; 323 } 324 EXPORT_SYMBOL_NS(hda_dsp_core_power_up, SND_SOC_SOF_INTEL_HDA_COMMON); 325 326 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask) 327 { 328 u32 adspcs; 329 int ret; 330 331 /* update bits */ 332 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 333 HDA_DSP_REG_ADSPCS, 334 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0); 335 336 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 337 HDA_DSP_REG_ADSPCS, adspcs, 338 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), 339 HDA_DSP_REG_POLL_INTERVAL_US, 340 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); 341 if (ret < 0) 342 dev_err(sdev->dev, 343 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 344 __func__); 345 346 return ret; 347 } 348 349 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask) 350 { 351 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 352 const struct sof_intel_dsp_desc *chip = hda->desc; 353 int ret; 354 355 /* restrict core_mask to host managed cores mask */ 356 core_mask &= chip->host_managed_cores_mask; 357 358 /* return if core_mask is not valid or cores are already enabled */ 359 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask)) 360 return 0; 361 362 /* power up */ 363 ret = hda_dsp_core_power_up(sdev, core_mask); 364 if (ret < 0) { 365 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n", 366 core_mask); 367 return ret; 368 } 369 370 return hda_dsp_core_run(sdev, core_mask); 371 } 372 EXPORT_SYMBOL_NS(hda_dsp_enable_core, SND_SOC_SOF_INTEL_HDA_COMMON); 373 374 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev, 375 unsigned int core_mask) 376 { 377 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 378 const struct sof_intel_dsp_desc *chip = hda->desc; 379 int ret; 380 381 /* restrict core_mask to host managed cores mask */ 382 core_mask &= chip->host_managed_cores_mask; 383 384 /* return if core_mask is not valid */ 385 if (!core_mask) 386 return 0; 387 388 /* place core in reset prior to power down */ 389 ret = hda_dsp_core_stall_reset(sdev, core_mask); 390 if (ret < 0) { 391 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n", 392 core_mask); 393 return ret; 394 } 395 396 /* power down core */ 397 ret = hda_dsp_core_power_down(sdev, core_mask); 398 if (ret < 0) { 399 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n", 400 core_mask, ret); 401 return ret; 402 } 403 404 /* make sure we are in OFF state */ 405 if (hda_dsp_core_is_enabled(sdev, core_mask)) { 406 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n", 407 core_mask, ret); 408 ret = -EIO; 409 } 410 411 return ret; 412 } 413 EXPORT_SYMBOL_NS(hda_dsp_core_reset_power_down, SND_SOC_SOF_INTEL_HDA_COMMON); 414 415 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev) 416 { 417 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 418 const struct sof_intel_dsp_desc *chip = hda->desc; 419 420 if (sdev->dspless_mode_selected) 421 return; 422 423 /* enable IPC DONE and BUSY interrupts */ 424 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 425 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY, 426 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY); 427 428 /* enable IPC interrupt */ 429 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 430 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC); 431 } 432 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 433 434 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev) 435 { 436 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 437 const struct sof_intel_dsp_desc *chip = hda->desc; 438 439 if (sdev->dspless_mode_selected) 440 return; 441 442 /* disable IPC interrupt */ 443 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 444 HDA_DSP_ADSPIC_IPC, 0); 445 446 /* disable IPC BUSY and DONE interrupt */ 447 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 448 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0); 449 } 450 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_disable, SND_SOC_SOF_INTEL_HDA_COMMON); 451 452 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev) 453 { 454 int retry = HDA_DSP_REG_POLL_RETRY_COUNT; 455 struct snd_sof_pdata *pdata = sdev->pdata; 456 const struct sof_intel_dsp_desc *chip; 457 458 chip = get_chip_info(pdata); 459 while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) & 460 SOF_HDA_VS_D0I3C_CIP) { 461 if (!retry--) 462 return -ETIMEDOUT; 463 usleep_range(10, 15); 464 } 465 466 return 0; 467 } 468 469 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags) 470 { 471 const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm); 472 473 if (pm_ops && pm_ops->set_pm_gate) 474 return pm_ops->set_pm_gate(sdev, flags); 475 476 return 0; 477 } 478 479 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) 480 { 481 struct snd_sof_pdata *pdata = sdev->pdata; 482 const struct sof_intel_dsp_desc *chip; 483 int ret; 484 u8 reg; 485 486 chip = get_chip_info(pdata); 487 488 /* Write to D0I3C after Command-In-Progress bit is cleared */ 489 ret = hda_dsp_wait_d0i3c_done(sdev); 490 if (ret < 0) { 491 dev_err(sdev->dev, "CIP timeout before D0I3C update!\n"); 492 return ret; 493 } 494 495 /* Update D0I3C register */ 496 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset, 497 SOF_HDA_VS_D0I3C_I3, value); 498 499 /* 500 * The value written to the D0I3C::I3 bit may not be taken into account immediately. 501 * A delay is recommended before checking if D0I3C::CIP is cleared 502 */ 503 usleep_range(30, 40); 504 505 /* Wait for cmd in progress to be cleared before exiting the function */ 506 ret = hda_dsp_wait_d0i3c_done(sdev); 507 if (ret < 0) { 508 dev_err(sdev->dev, "CIP timeout after D0I3C update!\n"); 509 return ret; 510 } 511 512 reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset); 513 /* Confirm d0i3 state changed with paranoia check */ 514 if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) { 515 dev_err(sdev->dev, "failed to update D0I3C!\n"); 516 return -EIO; 517 } 518 519 trace_sof_intel_D0I3C_updated(sdev, reg); 520 521 return 0; 522 } 523 524 /* 525 * d0i3 streaming is enabled if all the active streams can 526 * work in d0i3 state and playback is enabled 527 */ 528 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev) 529 { 530 struct snd_pcm_substream *substream; 531 struct snd_sof_pcm *spcm; 532 bool playback_active = false; 533 int dir; 534 535 list_for_each_entry(spcm, &sdev->pcm_list, list) { 536 for_each_pcm_streams(dir) { 537 substream = spcm->stream[dir].substream; 538 if (!substream || !substream->runtime) 539 continue; 540 541 if (!spcm->stream[dir].d0i3_compatible) 542 return false; 543 544 if (dir == SNDRV_PCM_STREAM_PLAYBACK) 545 playback_active = true; 546 } 547 } 548 549 return playback_active; 550 } 551 552 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev, 553 const struct sof_dsp_power_state *target_state) 554 { 555 u32 flags = 0; 556 int ret; 557 u8 value = 0; 558 559 /* 560 * Sanity check for illegal state transitions 561 * The only allowed transitions are: 562 * 1. D3 -> D0I0 563 * 2. D0I0 -> D0I3 564 * 3. D0I3 -> D0I0 565 */ 566 switch (sdev->dsp_power_state.state) { 567 case SOF_DSP_PM_D0: 568 /* Follow the sequence below for D0 substate transitions */ 569 break; 570 case SOF_DSP_PM_D3: 571 /* Follow regular flow for D3 -> D0 transition */ 572 return 0; 573 default: 574 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n", 575 sdev->dsp_power_state.state, target_state->state); 576 return -EINVAL; 577 } 578 579 /* Set flags and register value for D0 target substate */ 580 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) { 581 value = SOF_HDA_VS_D0I3C_I3; 582 583 /* 584 * Trace DMA need to be disabled when the DSP enters 585 * D0I3 for S0Ix suspend, but it can be kept enabled 586 * when the DSP enters D0I3 while the system is in S0 587 * for debug purpose. 588 */ 589 if (!sdev->fw_trace_is_supported || 590 !hda_enable_trace_D0I3_S0 || 591 sdev->system_suspend_target != SOF_SUSPEND_NONE) 592 flags = HDA_PM_NO_DMA_TRACE; 593 594 if (hda_dsp_d0i3_streaming_applicable(sdev)) 595 flags |= HDA_PM_PG_STREAMING; 596 } else { 597 /* prevent power gating in D0I0 */ 598 flags = HDA_PM_PPG; 599 } 600 601 /* update D0I3C register */ 602 ret = hda_dsp_update_d0i3c_register(sdev, value); 603 if (ret < 0) 604 return ret; 605 606 /* 607 * Notify the DSP of the state change. 608 * If this IPC fails, revert the D0I3C register update in order 609 * to prevent partial state change. 610 */ 611 ret = hda_dsp_send_pm_gate_ipc(sdev, flags); 612 if (ret < 0) { 613 dev_err(sdev->dev, 614 "error: PM_GATE ipc error %d\n", ret); 615 goto revert; 616 } 617 618 return ret; 619 620 revert: 621 /* fallback to the previous register value */ 622 value = value ? 0 : SOF_HDA_VS_D0I3C_I3; 623 624 /* 625 * This can fail but return the IPC error to signal that 626 * the state change failed. 627 */ 628 hda_dsp_update_d0i3c_register(sdev, value); 629 630 return ret; 631 } 632 633 /* helper to log DSP state */ 634 static void hda_dsp_state_log(struct snd_sof_dev *sdev) 635 { 636 switch (sdev->dsp_power_state.state) { 637 case SOF_DSP_PM_D0: 638 switch (sdev->dsp_power_state.substate) { 639 case SOF_HDA_DSP_PM_D0I0: 640 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n"); 641 break; 642 case SOF_HDA_DSP_PM_D0I3: 643 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n"); 644 break; 645 default: 646 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n", 647 sdev->dsp_power_state.substate); 648 break; 649 } 650 break; 651 case SOF_DSP_PM_D1: 652 dev_dbg(sdev->dev, "Current DSP power state: D1\n"); 653 break; 654 case SOF_DSP_PM_D2: 655 dev_dbg(sdev->dev, "Current DSP power state: D2\n"); 656 break; 657 case SOF_DSP_PM_D3: 658 dev_dbg(sdev->dev, "Current DSP power state: D3\n"); 659 break; 660 default: 661 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n", 662 sdev->dsp_power_state.state); 663 break; 664 } 665 } 666 667 /* 668 * All DSP power state transitions are initiated by the driver. 669 * If the requested state change fails, the error is simply returned. 670 * Further state transitions are attempted only when the set_power_save() op 671 * is called again either because of a new IPC sent to the DSP or 672 * during system suspend/resume. 673 */ 674 static int hda_dsp_set_power_state(struct snd_sof_dev *sdev, 675 const struct sof_dsp_power_state *target_state) 676 { 677 int ret = 0; 678 679 switch (target_state->state) { 680 case SOF_DSP_PM_D0: 681 ret = hda_dsp_set_D0_state(sdev, target_state); 682 break; 683 case SOF_DSP_PM_D3: 684 /* The only allowed transition is: D0I0 -> D3 */ 685 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 && 686 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0) 687 break; 688 689 dev_err(sdev->dev, 690 "error: transition from %d to %d not allowed\n", 691 sdev->dsp_power_state.state, target_state->state); 692 return -EINVAL; 693 default: 694 dev_err(sdev->dev, "error: target state unsupported %d\n", 695 target_state->state); 696 return -EINVAL; 697 } 698 if (ret < 0) { 699 dev_err(sdev->dev, 700 "failed to set requested target DSP state %d substate %d\n", 701 target_state->state, target_state->substate); 702 return ret; 703 } 704 705 sdev->dsp_power_state = *target_state; 706 hda_dsp_state_log(sdev); 707 return ret; 708 } 709 710 int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev, 711 const struct sof_dsp_power_state *target_state) 712 { 713 /* 714 * When the DSP is already in D0I3 and the target state is D0I3, 715 * it could be the case that the DSP is in D0I3 during S0 716 * and the system is suspending to S0Ix. Therefore, 717 * hda_dsp_set_D0_state() must be called to disable trace DMA 718 * by sending the PM_GATE IPC to the FW. 719 */ 720 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 && 721 sdev->system_suspend_target == SOF_SUSPEND_S0IX) 722 return hda_dsp_set_power_state(sdev, target_state); 723 724 /* 725 * For all other cases, return without doing anything if 726 * the DSP is already in the target state. 727 */ 728 if (target_state->state == sdev->dsp_power_state.state && 729 target_state->substate == sdev->dsp_power_state.substate) 730 return 0; 731 732 return hda_dsp_set_power_state(sdev, target_state); 733 } 734 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc3, SND_SOC_SOF_INTEL_HDA_COMMON); 735 736 int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev, 737 const struct sof_dsp_power_state *target_state) 738 { 739 /* Return without doing anything if the DSP is already in the target state */ 740 if (target_state->state == sdev->dsp_power_state.state && 741 target_state->substate == sdev->dsp_power_state.substate) 742 return 0; 743 744 return hda_dsp_set_power_state(sdev, target_state); 745 } 746 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc4, SND_SOC_SOF_INTEL_HDA_COMMON); 747 748 /* 749 * Audio DSP states may transform as below:- 750 * 751 * Opportunistic D0I3 in S0 752 * Runtime +---------------------+ Delayed D0i3 work timeout 753 * suspend | +--------------------+ 754 * +------------+ D0I0(active) | | 755 * | | <---------------+ | 756 * | +--------> | New IPC | | 757 * | |Runtime +--^--+---------^--+--+ (via mailbox) | | 758 * | |resume | | | | | | 759 * | | | | | | | | 760 * | | System| | | | | | 761 * | | resume| | S3/S0IX | | | | 762 * | | | | suspend | | S0IX | | 763 * | | | | | |suspend | | 764 * | | | | | | | | 765 * | | | | | | | | 766 * +-v---+-----------+--v-------+ | | +------+----v----+ 767 * | | | +-----------> | 768 * | D3 (suspended) | | | D0I3 | 769 * | | +--------------+ | 770 * | | System resume | | 771 * +----------------------------+ +----------------+ 772 * 773 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams 774 * ignored the suspend trigger. Otherwise the DSP 775 * is in D3. 776 */ 777 778 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) 779 { 780 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 781 const struct sof_intel_dsp_desc *chip = hda->desc; 782 struct hdac_bus *bus = sof_to_bus(sdev); 783 bool imr_lost = false; 784 int ret, j; 785 786 /* 787 * The memory used for IMR boot loses its content in deeper than S3 788 * state on CAVS platforms. 789 * On ACE platforms due to the system architecture the IMR content is 790 * lost at S3 state already, they are tailored for s2idle use. 791 * We must not try IMR boot on next power up in these cases as it will 792 * fail. 793 */ 794 if (sdev->system_suspend_target > SOF_SUSPEND_S3 || 795 (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 && 796 sdev->system_suspend_target == SOF_SUSPEND_S3)) 797 imr_lost = true; 798 799 /* 800 * In case of firmware crash or boot failure set the skip_imr_boot to true 801 * as well in order to try to re-load the firmware to do a 'cold' boot. 802 */ 803 if (imr_lost || sdev->fw_state == SOF_FW_CRASHED || 804 sdev->fw_state == SOF_FW_BOOT_FAILED) 805 hda->skip_imr_boot = true; 806 807 ret = chip->disable_interrupts(sdev); 808 if (ret < 0) 809 return ret; 810 811 /* make sure that no irq handler is pending before shutdown */ 812 synchronize_irq(sdev->ipc_irq); 813 814 hda_codec_jack_wake_enable(sdev, runtime_suspend); 815 816 /* power down all hda links */ 817 hda_bus_ml_suspend(bus); 818 819 if (sdev->dspless_mode_selected) 820 goto skip_dsp; 821 822 ret = chip->power_down_dsp(sdev); 823 if (ret < 0) { 824 dev_err(sdev->dev, "failed to power down DSP during suspend\n"); 825 return ret; 826 } 827 828 /* reset ref counts for all cores */ 829 for (j = 0; j < chip->cores_num; j++) 830 sdev->dsp_core_ref_count[j] = 0; 831 832 /* disable ppcap interrupt */ 833 hda_dsp_ctrl_ppcap_enable(sdev, false); 834 hda_dsp_ctrl_ppcap_int_enable(sdev, false); 835 skip_dsp: 836 837 /* disable hda bus irq and streams */ 838 hda_dsp_ctrl_stop_chip(sdev); 839 840 /* disable LP retention mode */ 841 snd_sof_pci_update_bits(sdev, PCI_PGCTL, 842 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK); 843 844 /* reset controller */ 845 ret = hda_dsp_ctrl_link_reset(sdev, true); 846 if (ret < 0) { 847 dev_err(sdev->dev, 848 "error: failed to reset controller during suspend\n"); 849 return ret; 850 } 851 852 /* display codec can powered off after link reset */ 853 hda_codec_i915_display_power(sdev, false); 854 855 return 0; 856 } 857 858 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume) 859 { 860 const struct sof_intel_dsp_desc *chip; 861 int ret; 862 863 /* display codec must be powered before link reset */ 864 hda_codec_i915_display_power(sdev, true); 865 866 /* 867 * clear TCSEL to clear playback on some HD Audio 868 * codecs. PCI TCSEL is defined in the Intel manuals. 869 */ 870 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0); 871 872 /* reset and start hda controller */ 873 ret = hda_dsp_ctrl_init_chip(sdev); 874 if (ret < 0) { 875 dev_err(sdev->dev, 876 "error: failed to start controller after resume\n"); 877 goto cleanup; 878 } 879 880 /* check jack status */ 881 if (runtime_resume) { 882 hda_codec_jack_wake_enable(sdev, false); 883 if (sdev->system_suspend_target == SOF_SUSPEND_NONE) 884 hda_codec_jack_check(sdev); 885 } 886 887 if (!sdev->dspless_mode_selected) { 888 /* enable ppcap interrupt */ 889 hda_dsp_ctrl_ppcap_enable(sdev, true); 890 hda_dsp_ctrl_ppcap_int_enable(sdev, true); 891 } 892 893 chip = get_chip_info(sdev->pdata); 894 if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0) 895 hda_sdw_int_enable(sdev, true); 896 897 cleanup: 898 /* display codec can powered off after controller init */ 899 hda_codec_i915_display_power(sdev, false); 900 901 return 0; 902 } 903 904 int hda_dsp_resume(struct snd_sof_dev *sdev) 905 { 906 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 907 struct hdac_bus *bus = sof_to_bus(sdev); 908 struct pci_dev *pci = to_pci_dev(sdev->dev); 909 const struct sof_dsp_power_state target_state = { 910 .state = SOF_DSP_PM_D0, 911 .substate = SOF_HDA_DSP_PM_D0I0, 912 }; 913 int ret; 914 915 /* resume from D0I3 */ 916 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) { 917 ret = hda_bus_ml_resume(bus); 918 if (ret < 0) { 919 dev_err(sdev->dev, 920 "error %d in %s: failed to power up links", 921 ret, __func__); 922 return ret; 923 } 924 925 /* set up CORB/RIRB buffers if was on before suspend */ 926 hda_codec_resume_cmd_io(sdev); 927 928 /* Set DSP power state */ 929 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 930 if (ret < 0) { 931 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 932 target_state.state, target_state.substate); 933 return ret; 934 } 935 936 /* restore L1SEN bit */ 937 if (hda->l1_disabled) 938 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 939 HDA_VS_INTEL_EM2, 940 HDA_VS_INTEL_EM2_L1SEN, 0); 941 942 /* restore and disable the system wakeup */ 943 pci_restore_state(pci); 944 disable_irq_wake(pci->irq); 945 return 0; 946 } 947 948 /* init hda controller. DSP cores will be powered up during fw boot */ 949 ret = hda_resume(sdev, false); 950 if (ret < 0) 951 return ret; 952 953 return snd_sof_dsp_set_power_state(sdev, &target_state); 954 } 955 EXPORT_SYMBOL_NS(hda_dsp_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 956 957 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev) 958 { 959 const struct sof_dsp_power_state target_state = { 960 .state = SOF_DSP_PM_D0, 961 }; 962 int ret; 963 964 /* init hda controller. DSP cores will be powered up during fw boot */ 965 ret = hda_resume(sdev, true); 966 if (ret < 0) 967 return ret; 968 969 return snd_sof_dsp_set_power_state(sdev, &target_state); 970 } 971 EXPORT_SYMBOL_NS(hda_dsp_runtime_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 972 973 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev) 974 { 975 struct hdac_bus *hbus = sof_to_bus(sdev); 976 977 if (hbus->codec_powered) { 978 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n", 979 (unsigned int)hbus->codec_powered); 980 return -EBUSY; 981 } 982 983 return 0; 984 } 985 EXPORT_SYMBOL_NS(hda_dsp_runtime_idle, SND_SOC_SOF_INTEL_HDA_COMMON); 986 987 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev) 988 { 989 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 990 const struct sof_dsp_power_state target_state = { 991 .state = SOF_DSP_PM_D3, 992 }; 993 int ret; 994 995 if (!sdev->dspless_mode_selected) { 996 /* cancel any attempt for DSP D0I3 */ 997 cancel_delayed_work_sync(&hda->d0i3_work); 998 } 999 1000 /* stop hda controller and power dsp off */ 1001 ret = hda_suspend(sdev, true); 1002 if (ret < 0) 1003 return ret; 1004 1005 return snd_sof_dsp_set_power_state(sdev, &target_state); 1006 } 1007 EXPORT_SYMBOL_NS(hda_dsp_runtime_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1008 1009 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state) 1010 { 1011 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 1012 struct hdac_bus *bus = sof_to_bus(sdev); 1013 struct pci_dev *pci = to_pci_dev(sdev->dev); 1014 const struct sof_dsp_power_state target_dsp_state = { 1015 .state = target_state, 1016 .substate = target_state == SOF_DSP_PM_D0 ? 1017 SOF_HDA_DSP_PM_D0I3 : 0, 1018 }; 1019 int ret; 1020 1021 if (!sdev->dspless_mode_selected) { 1022 /* cancel any attempt for DSP D0I3 */ 1023 cancel_delayed_work_sync(&hda->d0i3_work); 1024 } 1025 1026 if (target_state == SOF_DSP_PM_D0) { 1027 /* Set DSP power state */ 1028 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1029 if (ret < 0) { 1030 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 1031 target_dsp_state.state, 1032 target_dsp_state.substate); 1033 return ret; 1034 } 1035 1036 /* enable L1SEN to make sure the system can enter S0Ix */ 1037 if (hda->l1_disabled) 1038 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2, 1039 HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN); 1040 1041 /* stop the CORB/RIRB DMA if it is On */ 1042 hda_codec_suspend_cmd_io(sdev); 1043 1044 /* no link can be powered in s0ix state */ 1045 ret = hda_bus_ml_suspend(bus); 1046 if (ret < 0) { 1047 dev_err(sdev->dev, 1048 "error %d in %s: failed to power down links", 1049 ret, __func__); 1050 return ret; 1051 } 1052 1053 /* enable the system waking up via IPC IRQ */ 1054 enable_irq_wake(pci->irq); 1055 pci_save_state(pci); 1056 return 0; 1057 } 1058 1059 /* stop hda controller and power dsp off */ 1060 ret = hda_suspend(sdev, false); 1061 if (ret < 0) { 1062 dev_err(bus->dev, "error: suspending dsp\n"); 1063 return ret; 1064 } 1065 1066 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1067 } 1068 EXPORT_SYMBOL_NS(hda_dsp_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1069 1070 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev) 1071 { 1072 struct hdac_bus *bus = sof_to_bus(sdev); 1073 struct hdac_stream *s; 1074 unsigned int active_streams = 0; 1075 int sd_offset; 1076 u32 val; 1077 1078 list_for_each_entry(s, &bus->stream_list, list) { 1079 sd_offset = SOF_STREAM_SD_OFFSET(s); 1080 val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, 1081 sd_offset); 1082 if (val & SOF_HDA_SD_CTL_DMA_START) 1083 active_streams |= BIT(s->index); 1084 } 1085 1086 return active_streams; 1087 } 1088 1089 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev) 1090 { 1091 int ret; 1092 1093 /* 1094 * Do not assume a certain timing between the prior 1095 * suspend flow, and running of this quirk function. 1096 * This is needed if the controller was just put 1097 * to reset before calling this function. 1098 */ 1099 usleep_range(500, 1000); 1100 1101 /* 1102 * Take controller out of reset to flush DMA 1103 * transactions. 1104 */ 1105 ret = hda_dsp_ctrl_link_reset(sdev, false); 1106 if (ret < 0) 1107 return ret; 1108 1109 usleep_range(500, 1000); 1110 1111 /* Restore state for shutdown, back to reset */ 1112 ret = hda_dsp_ctrl_link_reset(sdev, true); 1113 if (ret < 0) 1114 return ret; 1115 1116 return ret; 1117 } 1118 1119 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev) 1120 { 1121 unsigned int active_streams; 1122 int ret, ret2; 1123 1124 /* check if DMA cleanup has been successful */ 1125 active_streams = hda_dsp_check_for_dma_streams(sdev); 1126 1127 sdev->system_suspend_target = SOF_SUSPEND_S3; 1128 ret = snd_sof_suspend(sdev->dev); 1129 1130 if (active_streams) { 1131 dev_warn(sdev->dev, 1132 "There were active DSP streams (%#x) at shutdown, trying to recover\n", 1133 active_streams); 1134 ret2 = hda_dsp_s5_quirk(sdev); 1135 if (ret2 < 0) 1136 dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2); 1137 } 1138 1139 return ret; 1140 } 1141 EXPORT_SYMBOL_NS(hda_dsp_shutdown_dma_flush, SND_SOC_SOF_INTEL_HDA_COMMON); 1142 1143 int hda_dsp_shutdown(struct snd_sof_dev *sdev) 1144 { 1145 sdev->system_suspend_target = SOF_SUSPEND_S3; 1146 return snd_sof_suspend(sdev->dev); 1147 } 1148 EXPORT_SYMBOL_NS(hda_dsp_shutdown, SND_SOC_SOF_INTEL_HDA_COMMON); 1149 1150 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev) 1151 { 1152 int ret; 1153 1154 /* make sure all DAI resources are freed */ 1155 ret = hda_dsp_dais_suspend(sdev); 1156 if (ret < 0) 1157 dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__); 1158 1159 return ret; 1160 } 1161 EXPORT_SYMBOL_NS(hda_dsp_set_hw_params_upon_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 1162 1163 void hda_dsp_d0i3_work(struct work_struct *work) 1164 { 1165 struct sof_intel_hda_dev *hdev = container_of(work, 1166 struct sof_intel_hda_dev, 1167 d0i3_work.work); 1168 struct hdac_bus *bus = &hdev->hbus.core; 1169 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev); 1170 struct sof_dsp_power_state target_state = { 1171 .state = SOF_DSP_PM_D0, 1172 .substate = SOF_HDA_DSP_PM_D0I3, 1173 }; 1174 int ret; 1175 1176 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */ 1177 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev)) 1178 /* remain in D0I0 */ 1179 return; 1180 1181 /* This can fail but error cannot be propagated */ 1182 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 1183 if (ret < 0) 1184 dev_err_ratelimited(sdev->dev, 1185 "error: failed to set DSP state %d substate %d\n", 1186 target_state.state, target_state.substate); 1187 } 1188 EXPORT_SYMBOL_NS(hda_dsp_d0i3_work, SND_SOC_SOF_INTEL_HDA_COMMON); 1189 1190 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core) 1191 { 1192 const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm; 1193 int ret, ret1; 1194 1195 /* power up core */ 1196 ret = hda_dsp_enable_core(sdev, BIT(core)); 1197 if (ret < 0) { 1198 dev_err(sdev->dev, "failed to power up core %d with err: %d\n", 1199 core, ret); 1200 return ret; 1201 } 1202 1203 /* No need to send IPC for primary core or if FW boot is not complete */ 1204 if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE) 1205 return 0; 1206 1207 /* No need to continue the set_core_state ops is not available */ 1208 if (!pm_ops->set_core_state) 1209 return 0; 1210 1211 /* Now notify DSP for secondary cores */ 1212 ret = pm_ops->set_core_state(sdev, core, true); 1213 if (ret < 0) { 1214 dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n", 1215 core, ret); 1216 goto power_down; 1217 } 1218 1219 return ret; 1220 1221 power_down: 1222 /* power down core if it is host managed and return the original error if this fails too */ 1223 ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core)); 1224 if (ret1 < 0) 1225 dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1); 1226 1227 return ret; 1228 } 1229 EXPORT_SYMBOL_NS(hda_dsp_core_get, SND_SOC_SOF_INTEL_HDA_COMMON); 1230 1231 #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) 1232 void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable) 1233 { 1234 struct sof_intel_hda_dev *hdev; 1235 1236 hdev = sdev->pdata->hw_pdata; 1237 1238 if (!hdev->sdw) 1239 return; 1240 1241 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2, 1242 HDA_DSP_REG_ADSPIC2_SNDW, 1243 enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0); 1244 } 1245 EXPORT_SYMBOL_NS(hda_common_enable_sdw_irq, SND_SOC_SOF_INTEL_HDA_COMMON); 1246 1247 void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable) 1248 { 1249 u32 interface_mask = hda_get_interface_mask(sdev); 1250 const struct sof_intel_dsp_desc *chip; 1251 1252 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1253 return; 1254 1255 chip = get_chip_info(sdev->pdata); 1256 if (chip && chip->enable_sdw_irq) 1257 chip->enable_sdw_irq(sdev, enable); 1258 } 1259 EXPORT_SYMBOL_NS(hda_sdw_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 1260 1261 int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev) 1262 { 1263 struct sof_intel_hda_dev *hdev; 1264 struct sdw_intel_ctx *ctx; 1265 u32 caps; 1266 1267 hdev = sdev->pdata->hw_pdata; 1268 ctx = hdev->sdw; 1269 1270 caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP); 1271 caps &= SDW_SHIM_LCAP_LCOUNT_MASK; 1272 1273 /* Check HW supported vs property value */ 1274 if (caps < ctx->count) { 1275 dev_err(sdev->dev, 1276 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1277 __func__, ctx->count, caps); 1278 return -EINVAL; 1279 } 1280 1281 return 0; 1282 } 1283 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_common, SND_SOC_SOF_INTEL_HDA_COMMON); 1284 1285 int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev) 1286 { 1287 struct sof_intel_hda_dev *hdev; 1288 struct sdw_intel_ctx *ctx; 1289 struct hdac_bus *bus; 1290 u32 slcount; 1291 1292 bus = sof_to_bus(sdev); 1293 1294 hdev = sdev->pdata->hw_pdata; 1295 ctx = hdev->sdw; 1296 1297 slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW); 1298 1299 /* Check HW supported vs property value */ 1300 if (slcount < ctx->count) { 1301 dev_err(sdev->dev, 1302 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1303 __func__, ctx->count, slcount); 1304 return -EINVAL; 1305 } 1306 1307 return 0; 1308 } 1309 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_ext, SND_SOC_SOF_INTEL_HDA_COMMON); 1310 1311 int hda_sdw_check_lcount(struct snd_sof_dev *sdev) 1312 { 1313 const struct sof_intel_dsp_desc *chip; 1314 1315 chip = get_chip_info(sdev->pdata); 1316 if (chip && chip->read_sdw_lcount) 1317 return chip->read_sdw_lcount(sdev); 1318 1319 return 0; 1320 } 1321 EXPORT_SYMBOL_NS(hda_sdw_check_lcount, SND_SOC_SOF_INTEL_HDA_COMMON); 1322 1323 void hda_sdw_process_wakeen(struct snd_sof_dev *sdev) 1324 { 1325 u32 interface_mask = hda_get_interface_mask(sdev); 1326 const struct sof_intel_dsp_desc *chip; 1327 1328 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1329 return; 1330 1331 chip = get_chip_info(sdev->pdata); 1332 if (chip && chip->sdw_process_wakeen) 1333 chip->sdw_process_wakeen(sdev); 1334 } 1335 EXPORT_SYMBOL_NS(hda_sdw_process_wakeen, SND_SOC_SOF_INTEL_HDA_COMMON); 1336 1337 #endif 1338 1339 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev) 1340 { 1341 hda_sdw_int_enable(sdev, false); 1342 hda_dsp_ipc_int_disable(sdev); 1343 1344 return 0; 1345 } 1346 EXPORT_SYMBOL_NS(hda_dsp_disable_interrupts, SND_SOC_SOF_INTEL_HDA_COMMON); 1347 1348 static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = { 1349 {HDA_DSP_ROM_CSE_ERROR, "error: cse error"}, 1350 {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"}, 1351 {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"}, 1352 {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"}, 1353 {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"}, 1354 {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"}, 1355 {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"}, 1356 {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"}, 1357 {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"}, 1358 {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"}, 1359 {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"}, 1360 {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"}, 1361 {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"}, 1362 {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"}, 1363 {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"}, 1364 {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"}, 1365 }; 1366 1367 #define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state} 1368 static const struct hda_dsp_msg_code cavs_fsr_rom_state_names[] = { 1369 FSR_ROM_STATE_ENTRY(INIT), 1370 FSR_ROM_STATE_ENTRY(INIT_DONE), 1371 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1372 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1373 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1374 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1375 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1376 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1377 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT), 1378 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE), 1379 /* CSE states */ 1380 FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST), 1381 FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED), 1382 FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST), 1383 FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED), 1384 FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT), 1385 FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1), 1386 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY), 1387 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL), 1388 FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN), 1389 }; 1390 1391 static const struct hda_dsp_msg_code ace_fsr_rom_state_names[] = { 1392 FSR_ROM_STATE_ENTRY(INIT), 1393 FSR_ROM_STATE_ENTRY(INIT_DONE), 1394 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1395 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1396 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1397 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1398 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1399 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1400 FSR_ROM_STATE_ENTRY(RESET_VECTOR_DONE), 1401 FSR_ROM_STATE_ENTRY(PURGE_BOOT), 1402 FSR_ROM_STATE_ENTRY(RESTORE_BOOT), 1403 FSR_ROM_STATE_ENTRY(FW_ENTRY_POINT), 1404 FSR_ROM_STATE_ENTRY(VALIDATE_PUB_KEY), 1405 FSR_ROM_STATE_ENTRY(POWER_DOWN_HPSRAM), 1406 FSR_ROM_STATE_ENTRY(POWER_DOWN_ULPSRAM), 1407 FSR_ROM_STATE_ENTRY(POWER_UP_ULPSRAM_STACK), 1408 FSR_ROM_STATE_ENTRY(POWER_UP_HPSRAM_DMA), 1409 FSR_ROM_STATE_ENTRY(BEFORE_EP_POINTER_READ), 1410 FSR_ROM_STATE_ENTRY(VALIDATE_MANIFEST), 1411 FSR_ROM_STATE_ENTRY(VALIDATE_FW_MODULE), 1412 FSR_ROM_STATE_ENTRY(PROTECT_IMR_REGION), 1413 FSR_ROM_STATE_ENTRY(PUSH_MODEL_ROUTINE), 1414 FSR_ROM_STATE_ENTRY(PULL_MODEL_ROUTINE), 1415 FSR_ROM_STATE_ENTRY(VALIDATE_PKG_DIR), 1416 FSR_ROM_STATE_ENTRY(VALIDATE_CPD), 1417 FSR_ROM_STATE_ENTRY(VALIDATE_CSS_MAN_HEADER), 1418 FSR_ROM_STATE_ENTRY(VALIDATE_BLOB_SVN), 1419 FSR_ROM_STATE_ENTRY(VERIFY_IFWI_PARTITION), 1420 FSR_ROM_STATE_ENTRY(REMOVE_ACCESS_CONTROL), 1421 FSR_ROM_STATE_ENTRY(AUTH_BYPASS), 1422 FSR_ROM_STATE_ENTRY(AUTH_ENABLED), 1423 FSR_ROM_STATE_ENTRY(INIT_DMA), 1424 FSR_ROM_STATE_ENTRY(PURGE_FW_ENTRY), 1425 FSR_ROM_STATE_ENTRY(PURGE_FW_END), 1426 FSR_ROM_STATE_ENTRY(CLEAN_UP_BSS_DONE), 1427 FSR_ROM_STATE_ENTRY(IMR_RESTORE_ENTRY), 1428 FSR_ROM_STATE_ENTRY(IMR_RESTORE_END), 1429 FSR_ROM_STATE_ENTRY(FW_MANIFEST_IN_DMA_BUFF), 1430 FSR_ROM_STATE_ENTRY(LOAD_CSE_MAN_TO_IMR), 1431 FSR_ROM_STATE_ENTRY(LOAD_FW_MAN_TO_IMR), 1432 FSR_ROM_STATE_ENTRY(LOAD_FW_CODE_TO_IMR), 1433 FSR_ROM_STATE_ENTRY(FW_LOADING_DONE), 1434 FSR_ROM_STATE_ENTRY(FW_CODE_LOADED), 1435 FSR_ROM_STATE_ENTRY(VERIFY_IMAGE_TYPE), 1436 FSR_ROM_STATE_ENTRY(AUTH_API_INIT), 1437 FSR_ROM_STATE_ENTRY(AUTH_API_PROC), 1438 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_BUSY), 1439 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_RESULT), 1440 FSR_ROM_STATE_ENTRY(AUTH_API_CLEANUP), 1441 }; 1442 1443 #define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state} 1444 static const struct hda_dsp_msg_code fsr_bringup_state_names[] = { 1445 FSR_BRINGUP_STATE_ENTRY(INIT), 1446 FSR_BRINGUP_STATE_ENTRY(INIT_DONE), 1447 FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD), 1448 FSR_BRINGUP_STATE_ENTRY(UNPACK_START), 1449 FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE), 1450 FSR_BRINGUP_STATE_ENTRY(FW_ENTERED), 1451 }; 1452 1453 #define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state} 1454 static const struct hda_dsp_msg_code fsr_wait_state_names[] = { 1455 FSR_WAIT_STATE_ENTRY(IPC_BUSY), 1456 FSR_WAIT_STATE_ENTRY(IPC_DONE), 1457 FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION), 1458 FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF), 1459 FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL), 1460 FSR_WAIT_STATE_ENTRY(CSE_CSR), 1461 }; 1462 1463 #define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod 1464 static const char * const fsr_module_names[] = { 1465 FSR_MODULE_NAME_ENTRY(ROM), 1466 FSR_MODULE_NAME_ENTRY(ROM_BYP), 1467 FSR_MODULE_NAME_ENTRY(BASE_FW), 1468 FSR_MODULE_NAME_ENTRY(LP_BOOT), 1469 FSR_MODULE_NAME_ENTRY(BRNGUP), 1470 FSR_MODULE_NAME_ENTRY(ROM_EXT), 1471 }; 1472 1473 static const char * 1474 hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code, 1475 size_t array_size) 1476 { 1477 int i; 1478 1479 for (i = 0; i < array_size; i++) { 1480 if (code == msg_code[i].code) 1481 return msg_code[i].text; 1482 } 1483 1484 return NULL; 1485 } 1486 1487 void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level) 1488 { 1489 const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata); 1490 const char *state_text, *error_text, *module_text; 1491 u32 fsr, state, wait_state, module, error_code; 1492 1493 fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg); 1494 state = FSR_TO_STATE_CODE(fsr); 1495 wait_state = FSR_TO_WAIT_STATE_CODE(fsr); 1496 module = FSR_TO_MODULE_CODE(fsr); 1497 1498 if (module > FSR_MOD_ROM_EXT) 1499 module_text = "unknown"; 1500 else 1501 module_text = fsr_module_names[module]; 1502 1503 if (module == FSR_MOD_BRNGUP) { 1504 state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names, 1505 ARRAY_SIZE(fsr_bringup_state_names)); 1506 } else { 1507 if (chip->hw_ip_version < SOF_INTEL_ACE_1_0) 1508 state_text = hda_dsp_get_state_text(state, 1509 cavs_fsr_rom_state_names, 1510 ARRAY_SIZE(cavs_fsr_rom_state_names)); 1511 else 1512 state_text = hda_dsp_get_state_text(state, 1513 ace_fsr_rom_state_names, 1514 ARRAY_SIZE(ace_fsr_rom_state_names)); 1515 } 1516 1517 /* not for us, must be generic sof message */ 1518 if (!state_text) { 1519 dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr); 1520 return; 1521 } 1522 1523 if (wait_state) { 1524 const char *wait_state_text; 1525 1526 wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names, 1527 ARRAY_SIZE(fsr_wait_state_names)); 1528 if (!wait_state_text) 1529 wait_state_text = "unknown"; 1530 1531 dev_printk(level, sdev->dev, 1532 "%#010x: module: %s, state: %s, waiting for: %s, %s\n", 1533 fsr, module_text, state_text, wait_state_text, 1534 fsr & FSR_HALTED ? "not running" : "running"); 1535 } else { 1536 dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n", 1537 fsr, module_text, state_text, 1538 fsr & FSR_HALTED ? "not running" : "running"); 1539 } 1540 1541 error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4); 1542 if (!error_code) 1543 return; 1544 1545 error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts, 1546 ARRAY_SIZE(hda_dsp_rom_fw_error_texts)); 1547 if (!error_text) 1548 error_text = "unknown"; 1549 1550 if (state == FSR_STATE_FW_ENTERED) 1551 dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code, 1552 error_text); 1553 else 1554 dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code, 1555 error_text); 1556 } 1557 EXPORT_SYMBOL_NS(hda_dsp_get_state, SND_SOC_SOF_INTEL_HDA_COMMON); 1558 1559 static void hda_dsp_get_registers(struct snd_sof_dev *sdev, 1560 struct sof_ipc_dsp_oops_xtensa *xoops, 1561 struct sof_ipc_panic_info *panic_info, 1562 u32 *stack, size_t stack_words) 1563 { 1564 u32 offset = sdev->dsp_oops_offset; 1565 1566 /* first read registers */ 1567 sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops)); 1568 1569 /* note: variable AR register array is not read */ 1570 1571 /* then get panic info */ 1572 if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) { 1573 dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n", 1574 xoops->arch_hdr.totalsize); 1575 return; 1576 } 1577 offset += xoops->arch_hdr.totalsize; 1578 sof_block_read(sdev, sdev->mmio_bar, offset, 1579 panic_info, sizeof(*panic_info)); 1580 1581 /* then get the stack */ 1582 offset += sizeof(*panic_info); 1583 sof_block_read(sdev, sdev->mmio_bar, offset, stack, 1584 stack_words * sizeof(u32)); 1585 } 1586 1587 /* dump the first 8 dwords representing the extended ROM status */ 1588 void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level, 1589 u32 flags) 1590 { 1591 const struct sof_intel_dsp_desc *chip; 1592 char msg[128]; 1593 int len = 0; 1594 u32 value; 1595 int i; 1596 1597 chip = get_chip_info(sdev->pdata); 1598 for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) { 1599 value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4); 1600 len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value); 1601 } 1602 1603 dev_printk(level, sdev->dev, "extended rom status: %s", msg); 1604 1605 } 1606 1607 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags) 1608 { 1609 char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR; 1610 struct sof_ipc_dsp_oops_xtensa xoops; 1611 struct sof_ipc_panic_info panic_info; 1612 u32 stack[HDA_DSP_STACK_DUMP_SIZE]; 1613 1614 /* print ROM/FW status */ 1615 hda_dsp_get_state(sdev, level); 1616 1617 /* The firmware register dump only available with IPC3 */ 1618 if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC_TYPE_3) { 1619 u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS); 1620 u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP); 1621 1622 hda_dsp_get_registers(sdev, &xoops, &panic_info, stack, 1623 HDA_DSP_STACK_DUMP_SIZE); 1624 sof_print_oops_and_stack(sdev, level, status, panic, &xoops, 1625 &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE); 1626 } else { 1627 hda_dsp_dump_ext_rom_status(sdev, level, flags); 1628 } 1629 } 1630 EXPORT_SYMBOL_NS(hda_dsp_dump, SND_SOC_SOF_INTEL_HDA_COMMON); 1631