1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for generic Intel audio DSP HDA IP 16 */ 17 18 #include <linux/module.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include <sound/hda-mlink.h> 22 #include <trace/events/sof_intel.h> 23 #include <sound/sof/xtensa.h> 24 #include "../sof-audio.h" 25 #include "../ops.h" 26 #include "hda.h" 27 #include "mtl.h" 28 #include "hda-ipc.h" 29 30 #define EXCEPT_MAX_HDR_SIZE 0x400 31 #define HDA_EXT_ROM_STATUS_SIZE 8 32 33 struct hda_dsp_msg_code { 34 u32 code; 35 const char *text; 36 }; 37 38 static bool hda_enable_trace_D0I3_S0; 39 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG) 40 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444); 41 MODULE_PARM_DESC(enable_trace_D0I3_S0, 42 "SOF HDA enable trace when the DSP is in D0I3 in S0"); 43 #endif 44 45 static void hda_get_interfaces(struct snd_sof_dev *sdev, u32 *interface_mask) 46 { 47 const struct sof_intel_dsp_desc *chip; 48 49 chip = get_chip_info(sdev->pdata); 50 switch (chip->hw_ip_version) { 51 case SOF_INTEL_TANGIER: 52 case SOF_INTEL_BAYTRAIL: 53 case SOF_INTEL_BROADWELL: 54 interface_mask[SOF_DAI_DSP_ACCESS] = BIT(SOF_DAI_INTEL_SSP); 55 break; 56 case SOF_INTEL_CAVS_1_5: 57 case SOF_INTEL_CAVS_1_5_PLUS: 58 interface_mask[SOF_DAI_DSP_ACCESS] = 59 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | BIT(SOF_DAI_INTEL_HDA); 60 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 61 break; 62 case SOF_INTEL_CAVS_1_8: 63 case SOF_INTEL_CAVS_2_0: 64 case SOF_INTEL_CAVS_2_5: 65 case SOF_INTEL_ACE_1_0: 66 interface_mask[SOF_DAI_DSP_ACCESS] = 67 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 68 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 69 interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA); 70 break; 71 case SOF_INTEL_ACE_2_0: 72 case SOF_INTEL_ACE_3_0: 73 interface_mask[SOF_DAI_DSP_ACCESS] = 74 BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | 75 BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH); 76 /* all interfaces accessible without DSP */ 77 interface_mask[SOF_DAI_HOST_ACCESS] = 78 interface_mask[SOF_DAI_DSP_ACCESS]; 79 break; 80 default: 81 break; 82 } 83 } 84 85 u32 hda_get_interface_mask(struct snd_sof_dev *sdev) 86 { 87 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 88 89 hda_get_interfaces(sdev, interface_mask); 90 91 return interface_mask[sdev->dspless_mode_selected]; 92 } 93 EXPORT_SYMBOL_NS(hda_get_interface_mask, SND_SOC_SOF_INTEL_HDA_COMMON); 94 95 bool hda_is_chain_dma_supported(struct snd_sof_dev *sdev, u32 dai_type) 96 { 97 u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 }; 98 const struct sof_intel_dsp_desc *chip; 99 100 if (sdev->dspless_mode_selected) 101 return false; 102 103 hda_get_interfaces(sdev, interface_mask); 104 105 if (!(interface_mask[SOF_DAI_DSP_ACCESS] & BIT(dai_type))) 106 return false; 107 108 if (dai_type == SOF_DAI_INTEL_HDA) 109 return true; 110 111 switch (dai_type) { 112 case SOF_DAI_INTEL_SSP: 113 case SOF_DAI_INTEL_DMIC: 114 case SOF_DAI_INTEL_ALH: 115 chip = get_chip_info(sdev->pdata); 116 if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) 117 return false; 118 return true; 119 default: 120 return false; 121 } 122 } 123 EXPORT_SYMBOL_NS(hda_is_chain_dma_supported, SND_SOC_SOF_INTEL_HDA_COMMON); 124 125 /* 126 * DSP Core control. 127 */ 128 129 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask) 130 { 131 u32 adspcs; 132 u32 reset; 133 int ret; 134 135 /* set reset bits for cores */ 136 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 137 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 138 HDA_DSP_REG_ADSPCS, 139 reset, reset); 140 141 /* poll with timeout to check if operation successful */ 142 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 143 HDA_DSP_REG_ADSPCS, adspcs, 144 ((adspcs & reset) == reset), 145 HDA_DSP_REG_POLL_INTERVAL_US, 146 HDA_DSP_RESET_TIMEOUT_US); 147 if (ret < 0) { 148 dev_err(sdev->dev, 149 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 150 __func__); 151 return ret; 152 } 153 154 /* has core entered reset ? */ 155 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 156 HDA_DSP_REG_ADSPCS); 157 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 158 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) { 159 dev_err(sdev->dev, 160 "error: reset enter failed: core_mask %x adspcs 0x%x\n", 161 core_mask, adspcs); 162 ret = -EIO; 163 } 164 165 return ret; 166 } 167 168 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask) 169 { 170 unsigned int crst; 171 u32 adspcs; 172 int ret; 173 174 /* clear reset bits for cores */ 175 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 176 HDA_DSP_REG_ADSPCS, 177 HDA_DSP_ADSPCS_CRST_MASK(core_mask), 178 0); 179 180 /* poll with timeout to check if operation successful */ 181 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 182 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 183 HDA_DSP_REG_ADSPCS, adspcs, 184 !(adspcs & crst), 185 HDA_DSP_REG_POLL_INTERVAL_US, 186 HDA_DSP_RESET_TIMEOUT_US); 187 188 if (ret < 0) { 189 dev_err(sdev->dev, 190 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 191 __func__); 192 return ret; 193 } 194 195 /* has core left reset ? */ 196 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 197 HDA_DSP_REG_ADSPCS); 198 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) { 199 dev_err(sdev->dev, 200 "error: reset leave failed: core_mask %x adspcs 0x%x\n", 201 core_mask, adspcs); 202 ret = -EIO; 203 } 204 205 return ret; 206 } 207 208 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask) 209 { 210 /* stall core */ 211 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 212 HDA_DSP_REG_ADSPCS, 213 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 214 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 215 216 /* set reset state */ 217 return hda_dsp_core_reset_enter(sdev, core_mask); 218 } 219 EXPORT_SYMBOL_NS(hda_dsp_core_stall_reset, SND_SOC_SOF_INTEL_HDA_COMMON); 220 221 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask) 222 { 223 int val; 224 bool is_enable; 225 226 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS); 227 228 #define MASK_IS_EQUAL(v, m, field) ({ \ 229 u32 _m = field(m); \ 230 ((v) & _m) == _m; \ 231 }) 232 233 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && 234 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && 235 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && 236 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 237 238 #undef MASK_IS_EQUAL 239 240 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", 241 is_enable, core_mask); 242 243 return is_enable; 244 } 245 EXPORT_SYMBOL_NS(hda_dsp_core_is_enabled, SND_SOC_SOF_INTEL_HDA_COMMON); 246 247 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask) 248 { 249 int ret; 250 251 /* leave reset state */ 252 ret = hda_dsp_core_reset_leave(sdev, core_mask); 253 if (ret < 0) 254 return ret; 255 256 /* run core */ 257 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask); 258 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 259 HDA_DSP_REG_ADSPCS, 260 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 261 0); 262 263 /* is core now running ? */ 264 if (!hda_dsp_core_is_enabled(sdev, core_mask)) { 265 hda_dsp_core_stall_reset(sdev, core_mask); 266 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n", 267 core_mask); 268 ret = -EIO; 269 } 270 271 return ret; 272 } 273 EXPORT_SYMBOL_NS(hda_dsp_core_run, SND_SOC_SOF_INTEL_HDA_COMMON); 274 275 /* 276 * Power Management. 277 */ 278 279 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask) 280 { 281 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 282 const struct sof_intel_dsp_desc *chip = hda->desc; 283 unsigned int cpa; 284 u32 adspcs; 285 int ret; 286 287 /* restrict core_mask to host managed cores mask */ 288 core_mask &= chip->host_managed_cores_mask; 289 /* return if core_mask is not valid */ 290 if (!core_mask) 291 return 0; 292 293 /* update bits */ 294 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS, 295 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 296 HDA_DSP_ADSPCS_SPA_MASK(core_mask)); 297 298 /* poll with timeout to check if operation successful */ 299 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask); 300 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 301 HDA_DSP_REG_ADSPCS, adspcs, 302 (adspcs & cpa) == cpa, 303 HDA_DSP_REG_POLL_INTERVAL_US, 304 HDA_DSP_RESET_TIMEOUT_US); 305 if (ret < 0) { 306 dev_err(sdev->dev, 307 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 308 __func__); 309 return ret; 310 } 311 312 /* did core power up ? */ 313 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 314 HDA_DSP_REG_ADSPCS); 315 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) != 316 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) { 317 dev_err(sdev->dev, 318 "error: power up core failed core_mask %xadspcs 0x%x\n", 319 core_mask, adspcs); 320 ret = -EIO; 321 } 322 323 return ret; 324 } 325 EXPORT_SYMBOL_NS(hda_dsp_core_power_up, SND_SOC_SOF_INTEL_HDA_COMMON); 326 327 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask) 328 { 329 u32 adspcs; 330 int ret; 331 332 /* update bits */ 333 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 334 HDA_DSP_REG_ADSPCS, 335 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0); 336 337 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 338 HDA_DSP_REG_ADSPCS, adspcs, 339 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), 340 HDA_DSP_REG_POLL_INTERVAL_US, 341 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); 342 if (ret < 0) 343 dev_err(sdev->dev, 344 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 345 __func__); 346 347 return ret; 348 } 349 350 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask) 351 { 352 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 353 const struct sof_intel_dsp_desc *chip = hda->desc; 354 int ret; 355 356 /* restrict core_mask to host managed cores mask */ 357 core_mask &= chip->host_managed_cores_mask; 358 359 /* return if core_mask is not valid or cores are already enabled */ 360 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask)) 361 return 0; 362 363 /* power up */ 364 ret = hda_dsp_core_power_up(sdev, core_mask); 365 if (ret < 0) { 366 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n", 367 core_mask); 368 return ret; 369 } 370 371 return hda_dsp_core_run(sdev, core_mask); 372 } 373 EXPORT_SYMBOL_NS(hda_dsp_enable_core, SND_SOC_SOF_INTEL_HDA_COMMON); 374 375 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev, 376 unsigned int core_mask) 377 { 378 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 379 const struct sof_intel_dsp_desc *chip = hda->desc; 380 int ret; 381 382 /* restrict core_mask to host managed cores mask */ 383 core_mask &= chip->host_managed_cores_mask; 384 385 /* return if core_mask is not valid */ 386 if (!core_mask) 387 return 0; 388 389 /* place core in reset prior to power down */ 390 ret = hda_dsp_core_stall_reset(sdev, core_mask); 391 if (ret < 0) { 392 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n", 393 core_mask); 394 return ret; 395 } 396 397 /* power down core */ 398 ret = hda_dsp_core_power_down(sdev, core_mask); 399 if (ret < 0) { 400 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n", 401 core_mask, ret); 402 return ret; 403 } 404 405 /* make sure we are in OFF state */ 406 if (hda_dsp_core_is_enabled(sdev, core_mask)) { 407 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n", 408 core_mask, ret); 409 ret = -EIO; 410 } 411 412 return ret; 413 } 414 EXPORT_SYMBOL_NS(hda_dsp_core_reset_power_down, SND_SOC_SOF_INTEL_HDA_COMMON); 415 416 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev) 417 { 418 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 419 const struct sof_intel_dsp_desc *chip = hda->desc; 420 421 if (sdev->dspless_mode_selected) 422 return; 423 424 /* enable IPC DONE and BUSY interrupts */ 425 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 426 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY, 427 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY); 428 429 /* enable IPC interrupt */ 430 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 431 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC); 432 } 433 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 434 435 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev) 436 { 437 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 438 const struct sof_intel_dsp_desc *chip = hda->desc; 439 440 if (sdev->dspless_mode_selected) 441 return; 442 443 /* disable IPC interrupt */ 444 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 445 HDA_DSP_ADSPIC_IPC, 0); 446 447 /* disable IPC BUSY and DONE interrupt */ 448 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 449 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0); 450 } 451 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_disable, SND_SOC_SOF_INTEL_HDA_COMMON); 452 453 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev) 454 { 455 int retry = HDA_DSP_REG_POLL_RETRY_COUNT; 456 struct snd_sof_pdata *pdata = sdev->pdata; 457 const struct sof_intel_dsp_desc *chip; 458 459 chip = get_chip_info(pdata); 460 while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) & 461 SOF_HDA_VS_D0I3C_CIP) { 462 if (!retry--) 463 return -ETIMEDOUT; 464 usleep_range(10, 15); 465 } 466 467 return 0; 468 } 469 470 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags) 471 { 472 const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm); 473 474 if (pm_ops && pm_ops->set_pm_gate) 475 return pm_ops->set_pm_gate(sdev, flags); 476 477 return 0; 478 } 479 480 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) 481 { 482 struct snd_sof_pdata *pdata = sdev->pdata; 483 const struct sof_intel_dsp_desc *chip; 484 int ret; 485 u8 reg; 486 487 chip = get_chip_info(pdata); 488 489 /* Write to D0I3C after Command-In-Progress bit is cleared */ 490 ret = hda_dsp_wait_d0i3c_done(sdev); 491 if (ret < 0) { 492 dev_err(sdev->dev, "CIP timeout before D0I3C update!\n"); 493 return ret; 494 } 495 496 /* Update D0I3C register */ 497 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset, 498 SOF_HDA_VS_D0I3C_I3, value); 499 500 /* 501 * The value written to the D0I3C::I3 bit may not be taken into account immediately. 502 * A delay is recommended before checking if D0I3C::CIP is cleared 503 */ 504 usleep_range(30, 40); 505 506 /* Wait for cmd in progress to be cleared before exiting the function */ 507 ret = hda_dsp_wait_d0i3c_done(sdev); 508 if (ret < 0) { 509 dev_err(sdev->dev, "CIP timeout after D0I3C update!\n"); 510 return ret; 511 } 512 513 reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset); 514 /* Confirm d0i3 state changed with paranoia check */ 515 if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) { 516 dev_err(sdev->dev, "failed to update D0I3C!\n"); 517 return -EIO; 518 } 519 520 trace_sof_intel_D0I3C_updated(sdev, reg); 521 522 return 0; 523 } 524 525 /* 526 * d0i3 streaming is enabled if all the active streams can 527 * work in d0i3 state and playback is enabled 528 */ 529 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev) 530 { 531 struct snd_pcm_substream *substream; 532 struct snd_sof_pcm *spcm; 533 bool playback_active = false; 534 int dir; 535 536 list_for_each_entry(spcm, &sdev->pcm_list, list) { 537 for_each_pcm_streams(dir) { 538 substream = spcm->stream[dir].substream; 539 if (!substream || !substream->runtime) 540 continue; 541 542 if (!spcm->stream[dir].d0i3_compatible) 543 return false; 544 545 if (dir == SNDRV_PCM_STREAM_PLAYBACK) 546 playback_active = true; 547 } 548 } 549 550 return playback_active; 551 } 552 553 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev, 554 const struct sof_dsp_power_state *target_state) 555 { 556 u32 flags = 0; 557 int ret; 558 u8 value = 0; 559 560 /* 561 * Sanity check for illegal state transitions 562 * The only allowed transitions are: 563 * 1. D3 -> D0I0 564 * 2. D0I0 -> D0I3 565 * 3. D0I3 -> D0I0 566 */ 567 switch (sdev->dsp_power_state.state) { 568 case SOF_DSP_PM_D0: 569 /* Follow the sequence below for D0 substate transitions */ 570 break; 571 case SOF_DSP_PM_D3: 572 /* Follow regular flow for D3 -> D0 transition */ 573 return 0; 574 default: 575 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n", 576 sdev->dsp_power_state.state, target_state->state); 577 return -EINVAL; 578 } 579 580 /* Set flags and register value for D0 target substate */ 581 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) { 582 value = SOF_HDA_VS_D0I3C_I3; 583 584 /* 585 * Trace DMA need to be disabled when the DSP enters 586 * D0I3 for S0Ix suspend, but it can be kept enabled 587 * when the DSP enters D0I3 while the system is in S0 588 * for debug purpose. 589 */ 590 if (!sdev->fw_trace_is_supported || 591 !hda_enable_trace_D0I3_S0 || 592 sdev->system_suspend_target != SOF_SUSPEND_NONE) 593 flags = HDA_PM_NO_DMA_TRACE; 594 595 if (hda_dsp_d0i3_streaming_applicable(sdev)) 596 flags |= HDA_PM_PG_STREAMING; 597 } else { 598 /* prevent power gating in D0I0 */ 599 flags = HDA_PM_PPG; 600 } 601 602 /* update D0I3C register */ 603 ret = hda_dsp_update_d0i3c_register(sdev, value); 604 if (ret < 0) 605 return ret; 606 607 /* 608 * Notify the DSP of the state change. 609 * If this IPC fails, revert the D0I3C register update in order 610 * to prevent partial state change. 611 */ 612 ret = hda_dsp_send_pm_gate_ipc(sdev, flags); 613 if (ret < 0) { 614 dev_err(sdev->dev, 615 "error: PM_GATE ipc error %d\n", ret); 616 goto revert; 617 } 618 619 return ret; 620 621 revert: 622 /* fallback to the previous register value */ 623 value = value ? 0 : SOF_HDA_VS_D0I3C_I3; 624 625 /* 626 * This can fail but return the IPC error to signal that 627 * the state change failed. 628 */ 629 hda_dsp_update_d0i3c_register(sdev, value); 630 631 return ret; 632 } 633 634 /* helper to log DSP state */ 635 static void hda_dsp_state_log(struct snd_sof_dev *sdev) 636 { 637 switch (sdev->dsp_power_state.state) { 638 case SOF_DSP_PM_D0: 639 switch (sdev->dsp_power_state.substate) { 640 case SOF_HDA_DSP_PM_D0I0: 641 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n"); 642 break; 643 case SOF_HDA_DSP_PM_D0I3: 644 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n"); 645 break; 646 default: 647 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n", 648 sdev->dsp_power_state.substate); 649 break; 650 } 651 break; 652 case SOF_DSP_PM_D1: 653 dev_dbg(sdev->dev, "Current DSP power state: D1\n"); 654 break; 655 case SOF_DSP_PM_D2: 656 dev_dbg(sdev->dev, "Current DSP power state: D2\n"); 657 break; 658 case SOF_DSP_PM_D3: 659 dev_dbg(sdev->dev, "Current DSP power state: D3\n"); 660 break; 661 default: 662 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n", 663 sdev->dsp_power_state.state); 664 break; 665 } 666 } 667 668 /* 669 * All DSP power state transitions are initiated by the driver. 670 * If the requested state change fails, the error is simply returned. 671 * Further state transitions are attempted only when the set_power_save() op 672 * is called again either because of a new IPC sent to the DSP or 673 * during system suspend/resume. 674 */ 675 static int hda_dsp_set_power_state(struct snd_sof_dev *sdev, 676 const struct sof_dsp_power_state *target_state) 677 { 678 int ret = 0; 679 680 switch (target_state->state) { 681 case SOF_DSP_PM_D0: 682 ret = hda_dsp_set_D0_state(sdev, target_state); 683 break; 684 case SOF_DSP_PM_D3: 685 /* The only allowed transition is: D0I0 -> D3 */ 686 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 && 687 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0) 688 break; 689 690 dev_err(sdev->dev, 691 "error: transition from %d to %d not allowed\n", 692 sdev->dsp_power_state.state, target_state->state); 693 return -EINVAL; 694 default: 695 dev_err(sdev->dev, "error: target state unsupported %d\n", 696 target_state->state); 697 return -EINVAL; 698 } 699 if (ret < 0) { 700 dev_err(sdev->dev, 701 "failed to set requested target DSP state %d substate %d\n", 702 target_state->state, target_state->substate); 703 return ret; 704 } 705 706 sdev->dsp_power_state = *target_state; 707 hda_dsp_state_log(sdev); 708 return ret; 709 } 710 711 int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev, 712 const struct sof_dsp_power_state *target_state) 713 { 714 /* 715 * When the DSP is already in D0I3 and the target state is D0I3, 716 * it could be the case that the DSP is in D0I3 during S0 717 * and the system is suspending to S0Ix. Therefore, 718 * hda_dsp_set_D0_state() must be called to disable trace DMA 719 * by sending the PM_GATE IPC to the FW. 720 */ 721 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 && 722 sdev->system_suspend_target == SOF_SUSPEND_S0IX) 723 return hda_dsp_set_power_state(sdev, target_state); 724 725 /* 726 * For all other cases, return without doing anything if 727 * the DSP is already in the target state. 728 */ 729 if (target_state->state == sdev->dsp_power_state.state && 730 target_state->substate == sdev->dsp_power_state.substate) 731 return 0; 732 733 return hda_dsp_set_power_state(sdev, target_state); 734 } 735 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc3, SND_SOC_SOF_INTEL_HDA_COMMON); 736 737 int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev, 738 const struct sof_dsp_power_state *target_state) 739 { 740 /* Return without doing anything if the DSP is already in the target state */ 741 if (target_state->state == sdev->dsp_power_state.state && 742 target_state->substate == sdev->dsp_power_state.substate) 743 return 0; 744 745 return hda_dsp_set_power_state(sdev, target_state); 746 } 747 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc4, SND_SOC_SOF_INTEL_HDA_COMMON); 748 749 /* 750 * Audio DSP states may transform as below:- 751 * 752 * Opportunistic D0I3 in S0 753 * Runtime +---------------------+ Delayed D0i3 work timeout 754 * suspend | +--------------------+ 755 * +------------+ D0I0(active) | | 756 * | | <---------------+ | 757 * | +--------> | New IPC | | 758 * | |Runtime +--^--+---------^--+--+ (via mailbox) | | 759 * | |resume | | | | | | 760 * | | | | | | | | 761 * | | System| | | | | | 762 * | | resume| | S3/S0IX | | | | 763 * | | | | suspend | | S0IX | | 764 * | | | | | |suspend | | 765 * | | | | | | | | 766 * | | | | | | | | 767 * +-v---+-----------+--v-------+ | | +------+----v----+ 768 * | | | +-----------> | 769 * | D3 (suspended) | | | D0I3 | 770 * | | +--------------+ | 771 * | | System resume | | 772 * +----------------------------+ +----------------+ 773 * 774 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams 775 * ignored the suspend trigger. Otherwise the DSP 776 * is in D3. 777 */ 778 779 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) 780 { 781 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 782 const struct sof_intel_dsp_desc *chip = hda->desc; 783 struct hdac_bus *bus = sof_to_bus(sdev); 784 bool imr_lost = false; 785 int ret, j; 786 787 /* 788 * The memory used for IMR boot loses its content in deeper than S3 789 * state on CAVS platforms. 790 * On ACE platforms due to the system architecture the IMR content is 791 * lost at S3 state already, they are tailored for s2idle use. 792 * We must not try IMR boot on next power up in these cases as it will 793 * fail. 794 */ 795 if (sdev->system_suspend_target > SOF_SUSPEND_S3 || 796 (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 && 797 sdev->system_suspend_target == SOF_SUSPEND_S3)) 798 imr_lost = true; 799 800 /* 801 * In case of firmware crash or boot failure set the skip_imr_boot to true 802 * as well in order to try to re-load the firmware to do a 'cold' boot. 803 */ 804 if (imr_lost || sdev->fw_state == SOF_FW_CRASHED || 805 sdev->fw_state == SOF_FW_BOOT_FAILED) 806 hda->skip_imr_boot = true; 807 808 ret = chip->disable_interrupts(sdev); 809 if (ret < 0) 810 return ret; 811 812 /* make sure that no irq handler is pending before shutdown */ 813 synchronize_irq(sdev->ipc_irq); 814 815 hda_codec_jack_wake_enable(sdev, runtime_suspend); 816 817 /* power down all hda links */ 818 hda_bus_ml_suspend(bus); 819 820 if (sdev->dspless_mode_selected) 821 goto skip_dsp; 822 823 ret = chip->power_down_dsp(sdev); 824 if (ret < 0) { 825 dev_err(sdev->dev, "failed to power down DSP during suspend\n"); 826 return ret; 827 } 828 829 /* reset ref counts for all cores */ 830 for (j = 0; j < chip->cores_num; j++) 831 sdev->dsp_core_ref_count[j] = 0; 832 833 /* disable ppcap interrupt */ 834 hda_dsp_ctrl_ppcap_enable(sdev, false); 835 hda_dsp_ctrl_ppcap_int_enable(sdev, false); 836 skip_dsp: 837 838 /* disable hda bus irq and streams */ 839 hda_dsp_ctrl_stop_chip(sdev); 840 841 /* disable LP retention mode */ 842 snd_sof_pci_update_bits(sdev, PCI_PGCTL, 843 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK); 844 845 /* reset controller */ 846 ret = hda_dsp_ctrl_link_reset(sdev, true); 847 if (ret < 0) { 848 dev_err(sdev->dev, 849 "error: failed to reset controller during suspend\n"); 850 return ret; 851 } 852 853 /* display codec can powered off after link reset */ 854 hda_codec_i915_display_power(sdev, false); 855 856 return 0; 857 } 858 859 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume) 860 { 861 int ret; 862 863 /* display codec must be powered before link reset */ 864 hda_codec_i915_display_power(sdev, true); 865 866 /* 867 * clear TCSEL to clear playback on some HD Audio 868 * codecs. PCI TCSEL is defined in the Intel manuals. 869 */ 870 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0); 871 872 /* reset and start hda controller */ 873 ret = hda_dsp_ctrl_init_chip(sdev); 874 if (ret < 0) { 875 dev_err(sdev->dev, 876 "error: failed to start controller after resume\n"); 877 goto cleanup; 878 } 879 880 /* check jack status */ 881 if (runtime_resume) { 882 hda_codec_jack_wake_enable(sdev, false); 883 if (sdev->system_suspend_target == SOF_SUSPEND_NONE) 884 hda_codec_jack_check(sdev); 885 } 886 887 if (!sdev->dspless_mode_selected) { 888 /* enable ppcap interrupt */ 889 hda_dsp_ctrl_ppcap_enable(sdev, true); 890 hda_dsp_ctrl_ppcap_int_enable(sdev, true); 891 } 892 893 cleanup: 894 /* display codec can powered off after controller init */ 895 hda_codec_i915_display_power(sdev, false); 896 897 return 0; 898 } 899 900 int hda_dsp_resume(struct snd_sof_dev *sdev) 901 { 902 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 903 struct hdac_bus *bus = sof_to_bus(sdev); 904 struct pci_dev *pci = to_pci_dev(sdev->dev); 905 const struct sof_dsp_power_state target_state = { 906 .state = SOF_DSP_PM_D0, 907 .substate = SOF_HDA_DSP_PM_D0I0, 908 }; 909 int ret; 910 911 /* resume from D0I3 */ 912 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) { 913 ret = hda_bus_ml_resume(bus); 914 if (ret < 0) { 915 dev_err(sdev->dev, 916 "error %d in %s: failed to power up links", 917 ret, __func__); 918 return ret; 919 } 920 921 /* set up CORB/RIRB buffers if was on before suspend */ 922 hda_codec_resume_cmd_io(sdev); 923 924 /* Set DSP power state */ 925 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 926 if (ret < 0) { 927 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 928 target_state.state, target_state.substate); 929 return ret; 930 } 931 932 /* restore L1SEN bit */ 933 if (hda->l1_disabled) 934 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 935 HDA_VS_INTEL_EM2, 936 HDA_VS_INTEL_EM2_L1SEN, 0); 937 938 /* restore and disable the system wakeup */ 939 pci_restore_state(pci); 940 disable_irq_wake(pci->irq); 941 return 0; 942 } 943 944 /* init hda controller. DSP cores will be powered up during fw boot */ 945 ret = hda_resume(sdev, false); 946 if (ret < 0) 947 return ret; 948 949 return snd_sof_dsp_set_power_state(sdev, &target_state); 950 } 951 EXPORT_SYMBOL_NS(hda_dsp_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 952 953 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev) 954 { 955 const struct sof_dsp_power_state target_state = { 956 .state = SOF_DSP_PM_D0, 957 }; 958 int ret; 959 960 /* init hda controller. DSP cores will be powered up during fw boot */ 961 ret = hda_resume(sdev, true); 962 if (ret < 0) 963 return ret; 964 965 return snd_sof_dsp_set_power_state(sdev, &target_state); 966 } 967 EXPORT_SYMBOL_NS(hda_dsp_runtime_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 968 969 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev) 970 { 971 struct hdac_bus *hbus = sof_to_bus(sdev); 972 973 if (hbus->codec_powered) { 974 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n", 975 (unsigned int)hbus->codec_powered); 976 return -EBUSY; 977 } 978 979 return 0; 980 } 981 EXPORT_SYMBOL_NS(hda_dsp_runtime_idle, SND_SOC_SOF_INTEL_HDA_COMMON); 982 983 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev) 984 { 985 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 986 const struct sof_dsp_power_state target_state = { 987 .state = SOF_DSP_PM_D3, 988 }; 989 int ret; 990 991 if (!sdev->dspless_mode_selected) { 992 /* cancel any attempt for DSP D0I3 */ 993 cancel_delayed_work_sync(&hda->d0i3_work); 994 } 995 996 /* stop hda controller and power dsp off */ 997 ret = hda_suspend(sdev, true); 998 if (ret < 0) 999 return ret; 1000 1001 return snd_sof_dsp_set_power_state(sdev, &target_state); 1002 } 1003 EXPORT_SYMBOL_NS(hda_dsp_runtime_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1004 1005 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state) 1006 { 1007 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 1008 struct hdac_bus *bus = sof_to_bus(sdev); 1009 struct pci_dev *pci = to_pci_dev(sdev->dev); 1010 const struct sof_dsp_power_state target_dsp_state = { 1011 .state = target_state, 1012 .substate = target_state == SOF_DSP_PM_D0 ? 1013 SOF_HDA_DSP_PM_D0I3 : 0, 1014 }; 1015 int ret; 1016 1017 if (!sdev->dspless_mode_selected) { 1018 /* cancel any attempt for DSP D0I3 */ 1019 cancel_delayed_work_sync(&hda->d0i3_work); 1020 } 1021 1022 if (target_state == SOF_DSP_PM_D0) { 1023 /* Set DSP power state */ 1024 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1025 if (ret < 0) { 1026 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 1027 target_dsp_state.state, 1028 target_dsp_state.substate); 1029 return ret; 1030 } 1031 1032 /* enable L1SEN to make sure the system can enter S0Ix */ 1033 if (hda->l1_disabled) 1034 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2, 1035 HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN); 1036 1037 /* stop the CORB/RIRB DMA if it is On */ 1038 hda_codec_suspend_cmd_io(sdev); 1039 1040 /* no link can be powered in s0ix state */ 1041 ret = hda_bus_ml_suspend(bus); 1042 if (ret < 0) { 1043 dev_err(sdev->dev, 1044 "error %d in %s: failed to power down links", 1045 ret, __func__); 1046 return ret; 1047 } 1048 1049 /* enable the system waking up via IPC IRQ */ 1050 enable_irq_wake(pci->irq); 1051 pci_save_state(pci); 1052 return 0; 1053 } 1054 1055 /* stop hda controller and power dsp off */ 1056 ret = hda_suspend(sdev, false); 1057 if (ret < 0) { 1058 dev_err(bus->dev, "error: suspending dsp\n"); 1059 return ret; 1060 } 1061 1062 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 1063 } 1064 EXPORT_SYMBOL_NS(hda_dsp_suspend, SND_SOC_SOF_INTEL_HDA_COMMON); 1065 1066 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev) 1067 { 1068 struct hdac_bus *bus = sof_to_bus(sdev); 1069 struct hdac_stream *s; 1070 unsigned int active_streams = 0; 1071 int sd_offset; 1072 u32 val; 1073 1074 list_for_each_entry(s, &bus->stream_list, list) { 1075 sd_offset = SOF_STREAM_SD_OFFSET(s); 1076 val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, 1077 sd_offset); 1078 if (val & SOF_HDA_SD_CTL_DMA_START) 1079 active_streams |= BIT(s->index); 1080 } 1081 1082 return active_streams; 1083 } 1084 1085 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev) 1086 { 1087 int ret; 1088 1089 /* 1090 * Do not assume a certain timing between the prior 1091 * suspend flow, and running of this quirk function. 1092 * This is needed if the controller was just put 1093 * to reset before calling this function. 1094 */ 1095 usleep_range(500, 1000); 1096 1097 /* 1098 * Take controller out of reset to flush DMA 1099 * transactions. 1100 */ 1101 ret = hda_dsp_ctrl_link_reset(sdev, false); 1102 if (ret < 0) 1103 return ret; 1104 1105 usleep_range(500, 1000); 1106 1107 /* Restore state for shutdown, back to reset */ 1108 ret = hda_dsp_ctrl_link_reset(sdev, true); 1109 if (ret < 0) 1110 return ret; 1111 1112 return ret; 1113 } 1114 1115 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev) 1116 { 1117 unsigned int active_streams; 1118 int ret, ret2; 1119 1120 /* check if DMA cleanup has been successful */ 1121 active_streams = hda_dsp_check_for_dma_streams(sdev); 1122 1123 sdev->system_suspend_target = SOF_SUSPEND_S3; 1124 ret = snd_sof_suspend(sdev->dev); 1125 1126 if (active_streams) { 1127 dev_warn(sdev->dev, 1128 "There were active DSP streams (%#x) at shutdown, trying to recover\n", 1129 active_streams); 1130 ret2 = hda_dsp_s5_quirk(sdev); 1131 if (ret2 < 0) 1132 dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2); 1133 } 1134 1135 return ret; 1136 } 1137 EXPORT_SYMBOL_NS(hda_dsp_shutdown_dma_flush, SND_SOC_SOF_INTEL_HDA_COMMON); 1138 1139 int hda_dsp_shutdown(struct snd_sof_dev *sdev) 1140 { 1141 sdev->system_suspend_target = SOF_SUSPEND_S3; 1142 return snd_sof_suspend(sdev->dev); 1143 } 1144 EXPORT_SYMBOL_NS(hda_dsp_shutdown, SND_SOC_SOF_INTEL_HDA_COMMON); 1145 1146 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev) 1147 { 1148 int ret; 1149 1150 /* make sure all DAI resources are freed */ 1151 ret = hda_dsp_dais_suspend(sdev); 1152 if (ret < 0) 1153 dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__); 1154 1155 return ret; 1156 } 1157 EXPORT_SYMBOL_NS(hda_dsp_set_hw_params_upon_resume, SND_SOC_SOF_INTEL_HDA_COMMON); 1158 1159 void hda_dsp_d0i3_work(struct work_struct *work) 1160 { 1161 struct sof_intel_hda_dev *hdev = container_of(work, 1162 struct sof_intel_hda_dev, 1163 d0i3_work.work); 1164 struct hdac_bus *bus = &hdev->hbus.core; 1165 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev); 1166 struct sof_dsp_power_state target_state = { 1167 .state = SOF_DSP_PM_D0, 1168 .substate = SOF_HDA_DSP_PM_D0I3, 1169 }; 1170 int ret; 1171 1172 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */ 1173 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev)) 1174 /* remain in D0I0 */ 1175 return; 1176 1177 /* This can fail but error cannot be propagated */ 1178 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 1179 if (ret < 0) 1180 dev_err_ratelimited(sdev->dev, 1181 "error: failed to set DSP state %d substate %d\n", 1182 target_state.state, target_state.substate); 1183 } 1184 EXPORT_SYMBOL_NS(hda_dsp_d0i3_work, SND_SOC_SOF_INTEL_HDA_COMMON); 1185 1186 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core) 1187 { 1188 const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm; 1189 int ret, ret1; 1190 1191 /* power up core */ 1192 ret = hda_dsp_enable_core(sdev, BIT(core)); 1193 if (ret < 0) { 1194 dev_err(sdev->dev, "failed to power up core %d with err: %d\n", 1195 core, ret); 1196 return ret; 1197 } 1198 1199 /* No need to send IPC for primary core or if FW boot is not complete */ 1200 if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE) 1201 return 0; 1202 1203 /* No need to continue the set_core_state ops is not available */ 1204 if (!pm_ops->set_core_state) 1205 return 0; 1206 1207 /* Now notify DSP for secondary cores */ 1208 ret = pm_ops->set_core_state(sdev, core, true); 1209 if (ret < 0) { 1210 dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n", 1211 core, ret); 1212 goto power_down; 1213 } 1214 1215 return ret; 1216 1217 power_down: 1218 /* power down core if it is host managed and return the original error if this fails too */ 1219 ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core)); 1220 if (ret1 < 0) 1221 dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1); 1222 1223 return ret; 1224 } 1225 EXPORT_SYMBOL_NS(hda_dsp_core_get, SND_SOC_SOF_INTEL_HDA_COMMON); 1226 1227 #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) 1228 void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable) 1229 { 1230 struct sof_intel_hda_dev *hdev; 1231 1232 hdev = sdev->pdata->hw_pdata; 1233 1234 if (!hdev->sdw) 1235 return; 1236 1237 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2, 1238 HDA_DSP_REG_ADSPIC2_SNDW, 1239 enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0); 1240 } 1241 EXPORT_SYMBOL_NS(hda_common_enable_sdw_irq, SND_SOC_SOF_INTEL_HDA_COMMON); 1242 1243 void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable) 1244 { 1245 u32 interface_mask = hda_get_interface_mask(sdev); 1246 const struct sof_intel_dsp_desc *chip; 1247 1248 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1249 return; 1250 1251 chip = get_chip_info(sdev->pdata); 1252 if (chip && chip->enable_sdw_irq) 1253 chip->enable_sdw_irq(sdev, enable); 1254 } 1255 EXPORT_SYMBOL_NS(hda_sdw_int_enable, SND_SOC_SOF_INTEL_HDA_COMMON); 1256 1257 int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev) 1258 { 1259 struct sof_intel_hda_dev *hdev; 1260 struct sdw_intel_ctx *ctx; 1261 u32 caps; 1262 1263 hdev = sdev->pdata->hw_pdata; 1264 ctx = hdev->sdw; 1265 1266 caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP); 1267 caps &= SDW_SHIM_LCAP_LCOUNT_MASK; 1268 1269 /* Check HW supported vs property value */ 1270 if (caps < ctx->count) { 1271 dev_err(sdev->dev, 1272 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1273 __func__, ctx->count, caps); 1274 return -EINVAL; 1275 } 1276 1277 return 0; 1278 } 1279 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_common, SND_SOC_SOF_INTEL_HDA_COMMON); 1280 1281 int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev) 1282 { 1283 struct sof_intel_hda_dev *hdev; 1284 struct sdw_intel_ctx *ctx; 1285 struct hdac_bus *bus; 1286 u32 slcount; 1287 1288 bus = sof_to_bus(sdev); 1289 1290 hdev = sdev->pdata->hw_pdata; 1291 ctx = hdev->sdw; 1292 1293 slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW); 1294 1295 /* Check HW supported vs property value */ 1296 if (slcount < ctx->count) { 1297 dev_err(sdev->dev, 1298 "%s: BIOS master count %d is larger than hardware capabilities %d\n", 1299 __func__, ctx->count, slcount); 1300 return -EINVAL; 1301 } 1302 1303 return 0; 1304 } 1305 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_ext, SND_SOC_SOF_INTEL_HDA_COMMON); 1306 1307 int hda_sdw_check_lcount(struct snd_sof_dev *sdev) 1308 { 1309 const struct sof_intel_dsp_desc *chip; 1310 1311 chip = get_chip_info(sdev->pdata); 1312 if (chip && chip->read_sdw_lcount) 1313 return chip->read_sdw_lcount(sdev); 1314 1315 return 0; 1316 } 1317 EXPORT_SYMBOL_NS(hda_sdw_check_lcount, SND_SOC_SOF_INTEL_HDA_COMMON); 1318 1319 void hda_sdw_process_wakeen(struct snd_sof_dev *sdev) 1320 { 1321 u32 interface_mask = hda_get_interface_mask(sdev); 1322 const struct sof_intel_dsp_desc *chip; 1323 1324 if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH))) 1325 return; 1326 1327 chip = get_chip_info(sdev->pdata); 1328 if (chip && chip->sdw_process_wakeen) 1329 chip->sdw_process_wakeen(sdev); 1330 } 1331 EXPORT_SYMBOL_NS(hda_sdw_process_wakeen, SND_SOC_SOF_INTEL_HDA_COMMON); 1332 1333 #endif 1334 1335 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev) 1336 { 1337 hda_sdw_int_enable(sdev, false); 1338 hda_dsp_ipc_int_disable(sdev); 1339 1340 return 0; 1341 } 1342 EXPORT_SYMBOL_NS(hda_dsp_disable_interrupts, SND_SOC_SOF_INTEL_HDA_COMMON); 1343 1344 static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = { 1345 {HDA_DSP_ROM_CSE_ERROR, "error: cse error"}, 1346 {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"}, 1347 {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"}, 1348 {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"}, 1349 {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"}, 1350 {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"}, 1351 {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"}, 1352 {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"}, 1353 {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"}, 1354 {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"}, 1355 {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"}, 1356 {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"}, 1357 {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"}, 1358 {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"}, 1359 {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"}, 1360 {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"}, 1361 }; 1362 1363 #define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state} 1364 static const struct hda_dsp_msg_code cavs_fsr_rom_state_names[] = { 1365 FSR_ROM_STATE_ENTRY(INIT), 1366 FSR_ROM_STATE_ENTRY(INIT_DONE), 1367 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1368 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1369 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1370 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1371 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1372 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1373 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT), 1374 FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE), 1375 /* CSE states */ 1376 FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST), 1377 FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED), 1378 FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST), 1379 FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED), 1380 FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT), 1381 FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1), 1382 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY), 1383 FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL), 1384 FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN), 1385 }; 1386 1387 static const struct hda_dsp_msg_code ace_fsr_rom_state_names[] = { 1388 FSR_ROM_STATE_ENTRY(INIT), 1389 FSR_ROM_STATE_ENTRY(INIT_DONE), 1390 FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED), 1391 FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED), 1392 FSR_ROM_STATE_ENTRY(FW_FW_LOADED), 1393 FSR_ROM_STATE_ENTRY(FW_ENTERED), 1394 FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK), 1395 FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET), 1396 FSR_ROM_STATE_ENTRY(RESET_VECTOR_DONE), 1397 FSR_ROM_STATE_ENTRY(PURGE_BOOT), 1398 FSR_ROM_STATE_ENTRY(RESTORE_BOOT), 1399 FSR_ROM_STATE_ENTRY(FW_ENTRY_POINT), 1400 FSR_ROM_STATE_ENTRY(VALIDATE_PUB_KEY), 1401 FSR_ROM_STATE_ENTRY(POWER_DOWN_HPSRAM), 1402 FSR_ROM_STATE_ENTRY(POWER_DOWN_ULPSRAM), 1403 FSR_ROM_STATE_ENTRY(POWER_UP_ULPSRAM_STACK), 1404 FSR_ROM_STATE_ENTRY(POWER_UP_HPSRAM_DMA), 1405 FSR_ROM_STATE_ENTRY(BEFORE_EP_POINTER_READ), 1406 FSR_ROM_STATE_ENTRY(VALIDATE_MANIFEST), 1407 FSR_ROM_STATE_ENTRY(VALIDATE_FW_MODULE), 1408 FSR_ROM_STATE_ENTRY(PROTECT_IMR_REGION), 1409 FSR_ROM_STATE_ENTRY(PUSH_MODEL_ROUTINE), 1410 FSR_ROM_STATE_ENTRY(PULL_MODEL_ROUTINE), 1411 FSR_ROM_STATE_ENTRY(VALIDATE_PKG_DIR), 1412 FSR_ROM_STATE_ENTRY(VALIDATE_CPD), 1413 FSR_ROM_STATE_ENTRY(VALIDATE_CSS_MAN_HEADER), 1414 FSR_ROM_STATE_ENTRY(VALIDATE_BLOB_SVN), 1415 FSR_ROM_STATE_ENTRY(VERIFY_IFWI_PARTITION), 1416 FSR_ROM_STATE_ENTRY(REMOVE_ACCESS_CONTROL), 1417 FSR_ROM_STATE_ENTRY(AUTH_BYPASS), 1418 FSR_ROM_STATE_ENTRY(AUTH_ENABLED), 1419 FSR_ROM_STATE_ENTRY(INIT_DMA), 1420 FSR_ROM_STATE_ENTRY(PURGE_FW_ENTRY), 1421 FSR_ROM_STATE_ENTRY(PURGE_FW_END), 1422 FSR_ROM_STATE_ENTRY(CLEAN_UP_BSS_DONE), 1423 FSR_ROM_STATE_ENTRY(IMR_RESTORE_ENTRY), 1424 FSR_ROM_STATE_ENTRY(IMR_RESTORE_END), 1425 FSR_ROM_STATE_ENTRY(FW_MANIFEST_IN_DMA_BUFF), 1426 FSR_ROM_STATE_ENTRY(LOAD_CSE_MAN_TO_IMR), 1427 FSR_ROM_STATE_ENTRY(LOAD_FW_MAN_TO_IMR), 1428 FSR_ROM_STATE_ENTRY(LOAD_FW_CODE_TO_IMR), 1429 FSR_ROM_STATE_ENTRY(FW_LOADING_DONE), 1430 FSR_ROM_STATE_ENTRY(FW_CODE_LOADED), 1431 FSR_ROM_STATE_ENTRY(VERIFY_IMAGE_TYPE), 1432 FSR_ROM_STATE_ENTRY(AUTH_API_INIT), 1433 FSR_ROM_STATE_ENTRY(AUTH_API_PROC), 1434 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_BUSY), 1435 FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_RESULT), 1436 FSR_ROM_STATE_ENTRY(AUTH_API_CLEANUP), 1437 }; 1438 1439 #define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state} 1440 static const struct hda_dsp_msg_code fsr_bringup_state_names[] = { 1441 FSR_BRINGUP_STATE_ENTRY(INIT), 1442 FSR_BRINGUP_STATE_ENTRY(INIT_DONE), 1443 FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD), 1444 FSR_BRINGUP_STATE_ENTRY(UNPACK_START), 1445 FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE), 1446 FSR_BRINGUP_STATE_ENTRY(FW_ENTERED), 1447 }; 1448 1449 #define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state} 1450 static const struct hda_dsp_msg_code fsr_wait_state_names[] = { 1451 FSR_WAIT_STATE_ENTRY(IPC_BUSY), 1452 FSR_WAIT_STATE_ENTRY(IPC_DONE), 1453 FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION), 1454 FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF), 1455 FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL), 1456 FSR_WAIT_STATE_ENTRY(CSE_CSR), 1457 }; 1458 1459 #define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod 1460 static const char * const fsr_module_names[] = { 1461 FSR_MODULE_NAME_ENTRY(ROM), 1462 FSR_MODULE_NAME_ENTRY(ROM_BYP), 1463 FSR_MODULE_NAME_ENTRY(BASE_FW), 1464 FSR_MODULE_NAME_ENTRY(LP_BOOT), 1465 FSR_MODULE_NAME_ENTRY(BRNGUP), 1466 FSR_MODULE_NAME_ENTRY(ROM_EXT), 1467 }; 1468 1469 static const char * 1470 hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code, 1471 size_t array_size) 1472 { 1473 int i; 1474 1475 for (i = 0; i < array_size; i++) { 1476 if (code == msg_code[i].code) 1477 return msg_code[i].text; 1478 } 1479 1480 return NULL; 1481 } 1482 1483 void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level) 1484 { 1485 const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata); 1486 const char *state_text, *error_text, *module_text; 1487 u32 fsr, state, wait_state, module, error_code; 1488 1489 fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg); 1490 state = FSR_TO_STATE_CODE(fsr); 1491 wait_state = FSR_TO_WAIT_STATE_CODE(fsr); 1492 module = FSR_TO_MODULE_CODE(fsr); 1493 1494 if (module > FSR_MOD_ROM_EXT) 1495 module_text = "unknown"; 1496 else 1497 module_text = fsr_module_names[module]; 1498 1499 if (module == FSR_MOD_BRNGUP) { 1500 state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names, 1501 ARRAY_SIZE(fsr_bringup_state_names)); 1502 } else { 1503 if (chip->hw_ip_version < SOF_INTEL_ACE_1_0) 1504 state_text = hda_dsp_get_state_text(state, 1505 cavs_fsr_rom_state_names, 1506 ARRAY_SIZE(cavs_fsr_rom_state_names)); 1507 else 1508 state_text = hda_dsp_get_state_text(state, 1509 ace_fsr_rom_state_names, 1510 ARRAY_SIZE(ace_fsr_rom_state_names)); 1511 } 1512 1513 /* not for us, must be generic sof message */ 1514 if (!state_text) { 1515 dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr); 1516 return; 1517 } 1518 1519 if (wait_state) { 1520 const char *wait_state_text; 1521 1522 wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names, 1523 ARRAY_SIZE(fsr_wait_state_names)); 1524 if (!wait_state_text) 1525 wait_state_text = "unknown"; 1526 1527 dev_printk(level, sdev->dev, 1528 "%#010x: module: %s, state: %s, waiting for: %s, %s\n", 1529 fsr, module_text, state_text, wait_state_text, 1530 fsr & FSR_HALTED ? "not running" : "running"); 1531 } else { 1532 dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n", 1533 fsr, module_text, state_text, 1534 fsr & FSR_HALTED ? "not running" : "running"); 1535 } 1536 1537 error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4); 1538 if (!error_code) 1539 return; 1540 1541 error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts, 1542 ARRAY_SIZE(hda_dsp_rom_fw_error_texts)); 1543 if (!error_text) 1544 error_text = "unknown"; 1545 1546 if (state == FSR_STATE_FW_ENTERED) 1547 dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code, 1548 error_text); 1549 else 1550 dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code, 1551 error_text); 1552 } 1553 EXPORT_SYMBOL_NS(hda_dsp_get_state, SND_SOC_SOF_INTEL_HDA_COMMON); 1554 1555 static void hda_dsp_get_registers(struct snd_sof_dev *sdev, 1556 struct sof_ipc_dsp_oops_xtensa *xoops, 1557 struct sof_ipc_panic_info *panic_info, 1558 u32 *stack, size_t stack_words) 1559 { 1560 u32 offset = sdev->dsp_oops_offset; 1561 1562 /* first read registers */ 1563 sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops)); 1564 1565 /* note: variable AR register array is not read */ 1566 1567 /* then get panic info */ 1568 if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) { 1569 dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n", 1570 xoops->arch_hdr.totalsize); 1571 return; 1572 } 1573 offset += xoops->arch_hdr.totalsize; 1574 sof_block_read(sdev, sdev->mmio_bar, offset, 1575 panic_info, sizeof(*panic_info)); 1576 1577 /* then get the stack */ 1578 offset += sizeof(*panic_info); 1579 sof_block_read(sdev, sdev->mmio_bar, offset, stack, 1580 stack_words * sizeof(u32)); 1581 } 1582 1583 /* dump the first 8 dwords representing the extended ROM status */ 1584 void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level, 1585 u32 flags) 1586 { 1587 const struct sof_intel_dsp_desc *chip; 1588 char msg[128]; 1589 int len = 0; 1590 u32 value; 1591 int i; 1592 1593 chip = get_chip_info(sdev->pdata); 1594 for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) { 1595 value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4); 1596 len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value); 1597 } 1598 1599 dev_printk(level, sdev->dev, "extended rom status: %s", msg); 1600 1601 } 1602 1603 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags) 1604 { 1605 char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR; 1606 struct sof_ipc_dsp_oops_xtensa xoops; 1607 struct sof_ipc_panic_info panic_info; 1608 u32 stack[HDA_DSP_STACK_DUMP_SIZE]; 1609 1610 /* print ROM/FW status */ 1611 hda_dsp_get_state(sdev, level); 1612 1613 /* The firmware register dump only available with IPC3 */ 1614 if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC_TYPE_3) { 1615 u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS); 1616 u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP); 1617 1618 hda_dsp_get_registers(sdev, &xoops, &panic_info, stack, 1619 HDA_DSP_STACK_DUMP_SIZE); 1620 sof_print_oops_and_stack(sdev, level, status, panic, &xoops, 1621 &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE); 1622 } else { 1623 hda_dsp_dump_ext_rom_status(sdev, level, flags); 1624 } 1625 } 1626 EXPORT_SYMBOL_NS(hda_dsp_dump, SND_SOC_SOF_INTEL_HDA_COMMON); 1627