xref: /linux/sound/soc/sof/intel/hda-dsp.c (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for generic Intel audio DSP HDA IP
16  */
17 
18 #include <linux/module.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/hda-mlink.h>
22 #include <trace/events/sof_intel.h>
23 #include <sound/sof/xtensa.h>
24 #include "../sof-audio.h"
25 #include "../ops.h"
26 #include "hda.h"
27 #include "mtl.h"
28 #include "hda-ipc.h"
29 
30 #define EXCEPT_MAX_HDR_SIZE	0x400
31 #define HDA_EXT_ROM_STATUS_SIZE 8
32 
33 struct hda_dsp_msg_code {
34 	u32 code;
35 	const char *text;
36 };
37 
38 static bool hda_enable_trace_D0I3_S0;
39 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
40 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
41 MODULE_PARM_DESC(enable_trace_D0I3_S0,
42 		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
43 #endif
44 
45 static void hda_get_interfaces(struct snd_sof_dev *sdev, u32 *interface_mask)
46 {
47 	const struct sof_intel_dsp_desc *chip;
48 
49 	chip = get_chip_info(sdev->pdata);
50 	switch (chip->hw_ip_version) {
51 	case SOF_INTEL_TANGIER:
52 	case SOF_INTEL_BAYTRAIL:
53 	case SOF_INTEL_BROADWELL:
54 		interface_mask[SOF_DAI_DSP_ACCESS] =  BIT(SOF_DAI_INTEL_SSP);
55 		break;
56 	case SOF_INTEL_CAVS_1_5:
57 	case SOF_INTEL_CAVS_1_5_PLUS:
58 		interface_mask[SOF_DAI_DSP_ACCESS] =
59 			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) | BIT(SOF_DAI_INTEL_HDA);
60 		interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA);
61 		break;
62 	case SOF_INTEL_CAVS_1_8:
63 	case SOF_INTEL_CAVS_2_0:
64 	case SOF_INTEL_CAVS_2_5:
65 	case SOF_INTEL_ACE_1_0:
66 		interface_mask[SOF_DAI_DSP_ACCESS] =
67 			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
68 			BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
69 		interface_mask[SOF_DAI_HOST_ACCESS] = BIT(SOF_DAI_INTEL_HDA);
70 		break;
71 	case SOF_INTEL_ACE_2_0:
72 	case SOF_INTEL_ACE_3_0:
73 	case SOF_INTEL_ACE_4_0:
74 		interface_mask[SOF_DAI_DSP_ACCESS] =
75 			BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
76 			BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
77 		 /* all interfaces accessible without DSP */
78 		interface_mask[SOF_DAI_HOST_ACCESS] =
79 			interface_mask[SOF_DAI_DSP_ACCESS];
80 		break;
81 	default:
82 		break;
83 	}
84 }
85 
86 u32 hda_get_interface_mask(struct snd_sof_dev *sdev)
87 {
88 	u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 };
89 
90 	hda_get_interfaces(sdev, interface_mask);
91 
92 	return interface_mask[sdev->dspless_mode_selected];
93 }
94 EXPORT_SYMBOL_NS(hda_get_interface_mask, "SND_SOC_SOF_INTEL_HDA_COMMON");
95 
96 bool hda_is_chain_dma_supported(struct snd_sof_dev *sdev, u32 dai_type)
97 {
98 	u32 interface_mask[SOF_DAI_ACCESS_NUM] = { 0 };
99 	const struct sof_intel_dsp_desc *chip;
100 
101 	if (sdev->dspless_mode_selected)
102 		return false;
103 
104 	hda_get_interfaces(sdev, interface_mask);
105 
106 	if (!(interface_mask[SOF_DAI_DSP_ACCESS] & BIT(dai_type)))
107 		return false;
108 
109 	if (dai_type == SOF_DAI_INTEL_HDA)
110 		return true;
111 
112 	switch (dai_type) {
113 	case SOF_DAI_INTEL_SSP:
114 	case SOF_DAI_INTEL_DMIC:
115 	case SOF_DAI_INTEL_ALH:
116 		chip = get_chip_info(sdev->pdata);
117 		if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
118 			return false;
119 		return true;
120 	default:
121 		return false;
122 	}
123 }
124 EXPORT_SYMBOL_NS(hda_is_chain_dma_supported, "SND_SOC_SOF_INTEL_HDA_COMMON");
125 
126 /*
127  * DSP Core control.
128  */
129 
130 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
131 {
132 	u32 adspcs;
133 	u32 reset;
134 	int ret;
135 
136 	/* set reset bits for cores */
137 	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
138 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
139 					 HDA_DSP_REG_ADSPCS,
140 					 reset, reset);
141 
142 	/* poll with timeout to check if operation successful */
143 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
144 					HDA_DSP_REG_ADSPCS, adspcs,
145 					((adspcs & reset) == reset),
146 					HDA_DSP_REG_POLL_INTERVAL_US,
147 					HDA_DSP_RESET_TIMEOUT_US);
148 	if (ret < 0) {
149 		dev_err(sdev->dev,
150 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
151 			__func__);
152 		return ret;
153 	}
154 
155 	/* has core entered reset ? */
156 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
157 				  HDA_DSP_REG_ADSPCS);
158 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
159 		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
160 		dev_err(sdev->dev,
161 			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
162 			core_mask, adspcs);
163 		ret = -EIO;
164 	}
165 
166 	return ret;
167 }
168 
169 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
170 {
171 	unsigned int crst;
172 	u32 adspcs;
173 	int ret;
174 
175 	/* clear reset bits for cores */
176 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
177 					 HDA_DSP_REG_ADSPCS,
178 					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
179 					 0);
180 
181 	/* poll with timeout to check if operation successful */
182 	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
183 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
184 					    HDA_DSP_REG_ADSPCS, adspcs,
185 					    !(adspcs & crst),
186 					    HDA_DSP_REG_POLL_INTERVAL_US,
187 					    HDA_DSP_RESET_TIMEOUT_US);
188 
189 	if (ret < 0) {
190 		dev_err(sdev->dev,
191 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
192 			__func__);
193 		return ret;
194 	}
195 
196 	/* has core left reset ? */
197 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
198 				  HDA_DSP_REG_ADSPCS);
199 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
200 		dev_err(sdev->dev,
201 			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
202 			core_mask, adspcs);
203 		ret = -EIO;
204 	}
205 
206 	return ret;
207 }
208 
209 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
210 {
211 	/* stall core */
212 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
213 					 HDA_DSP_REG_ADSPCS,
214 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
215 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
216 
217 	/* set reset state */
218 	return hda_dsp_core_reset_enter(sdev, core_mask);
219 }
220 EXPORT_SYMBOL_NS(hda_dsp_core_stall_reset, "SND_SOC_SOF_INTEL_HDA_COMMON");
221 
222 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
223 {
224 	int val;
225 	bool is_enable;
226 
227 	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
228 
229 #define MASK_IS_EQUAL(v, m, field) ({	\
230 	u32 _m = field(m);		\
231 	((v) & _m) == _m;		\
232 })
233 
234 	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
235 		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
236 		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
237 		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
238 
239 #undef MASK_IS_EQUAL
240 
241 	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
242 		is_enable, core_mask);
243 
244 	return is_enable;
245 }
246 EXPORT_SYMBOL_NS(hda_dsp_core_is_enabled, "SND_SOC_SOF_INTEL_HDA_COMMON");
247 
248 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
249 {
250 	int ret;
251 
252 	/* leave reset state */
253 	ret = hda_dsp_core_reset_leave(sdev, core_mask);
254 	if (ret < 0)
255 		return ret;
256 
257 	/* run core */
258 	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
259 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
260 					 HDA_DSP_REG_ADSPCS,
261 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
262 					 0);
263 
264 	/* is core now running ? */
265 	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
266 		hda_dsp_core_stall_reset(sdev, core_mask);
267 		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
268 			core_mask);
269 		ret = -EIO;
270 	}
271 
272 	return ret;
273 }
274 EXPORT_SYMBOL_NS(hda_dsp_core_run, "SND_SOC_SOF_INTEL_HDA_COMMON");
275 
276 /*
277  * Power Management.
278  */
279 
280 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
281 {
282 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
283 	const struct sof_intel_dsp_desc *chip = hda->desc;
284 	unsigned int cpa;
285 	u32 adspcs;
286 	int ret;
287 
288 	/* restrict core_mask to host managed cores mask */
289 	core_mask &= chip->host_managed_cores_mask;
290 	/* return if core_mask is not valid */
291 	if (!core_mask)
292 		return 0;
293 
294 	/* update bits */
295 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
296 				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
297 				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
298 
299 	/* poll with timeout to check if operation successful */
300 	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
301 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
302 					    HDA_DSP_REG_ADSPCS, adspcs,
303 					    (adspcs & cpa) == cpa,
304 					    HDA_DSP_REG_POLL_INTERVAL_US,
305 					    HDA_DSP_RESET_TIMEOUT_US);
306 	if (ret < 0) {
307 		dev_err(sdev->dev,
308 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
309 			__func__);
310 		return ret;
311 	}
312 
313 	/* did core power up ? */
314 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
315 				  HDA_DSP_REG_ADSPCS);
316 	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
317 		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
318 		dev_err(sdev->dev,
319 			"error: power up core failed core_mask %xadspcs 0x%x\n",
320 			core_mask, adspcs);
321 		ret = -EIO;
322 	}
323 
324 	return ret;
325 }
326 EXPORT_SYMBOL_NS(hda_dsp_core_power_up, "SND_SOC_SOF_INTEL_HDA_COMMON");
327 
328 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
329 {
330 	u32 adspcs;
331 	int ret;
332 
333 	/* update bits */
334 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
335 					 HDA_DSP_REG_ADSPCS,
336 					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
337 
338 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
339 				HDA_DSP_REG_ADSPCS, adspcs,
340 				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
341 				HDA_DSP_REG_POLL_INTERVAL_US,
342 				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
343 	if (ret < 0)
344 		dev_err(sdev->dev,
345 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
346 			__func__);
347 
348 	return ret;
349 }
350 
351 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
352 {
353 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
354 	const struct sof_intel_dsp_desc *chip = hda->desc;
355 	int ret;
356 
357 	/* restrict core_mask to host managed cores mask */
358 	core_mask &= chip->host_managed_cores_mask;
359 
360 	/* return if core_mask is not valid or cores are already enabled */
361 	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
362 		return 0;
363 
364 	/* power up */
365 	ret = hda_dsp_core_power_up(sdev, core_mask);
366 	if (ret < 0) {
367 		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
368 			core_mask);
369 		return ret;
370 	}
371 
372 	return hda_dsp_core_run(sdev, core_mask);
373 }
374 EXPORT_SYMBOL_NS(hda_dsp_enable_core, "SND_SOC_SOF_INTEL_HDA_COMMON");
375 
376 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
377 				  unsigned int core_mask)
378 {
379 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
380 	const struct sof_intel_dsp_desc *chip = hda->desc;
381 	int ret;
382 
383 	/* restrict core_mask to host managed cores mask */
384 	core_mask &= chip->host_managed_cores_mask;
385 
386 	/* return if core_mask is not valid */
387 	if (!core_mask)
388 		return 0;
389 
390 	/* place core in reset prior to power down */
391 	ret = hda_dsp_core_stall_reset(sdev, core_mask);
392 	if (ret < 0) {
393 		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
394 			core_mask);
395 		return ret;
396 	}
397 
398 	/* power down core */
399 	ret = hda_dsp_core_power_down(sdev, core_mask);
400 	if (ret < 0) {
401 		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
402 			core_mask, ret);
403 		return ret;
404 	}
405 
406 	/* make sure we are in OFF state */
407 	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
408 		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
409 			core_mask, ret);
410 		ret = -EIO;
411 	}
412 
413 	return ret;
414 }
415 EXPORT_SYMBOL_NS(hda_dsp_core_reset_power_down, "SND_SOC_SOF_INTEL_HDA_COMMON");
416 
417 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
418 {
419 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
420 	const struct sof_intel_dsp_desc *chip = hda->desc;
421 
422 	if (sdev->dspless_mode_selected)
423 		return;
424 
425 	/* enable IPC DONE and BUSY interrupts */
426 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
427 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
428 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
429 
430 	/* enable IPC interrupt */
431 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
432 				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
433 }
434 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_enable, "SND_SOC_SOF_INTEL_HDA_COMMON");
435 
436 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
437 {
438 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
439 	const struct sof_intel_dsp_desc *chip = hda->desc;
440 
441 	if (sdev->dspless_mode_selected)
442 		return;
443 
444 	/* disable IPC interrupt */
445 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
446 				HDA_DSP_ADSPIC_IPC, 0);
447 
448 	/* disable IPC BUSY and DONE interrupt */
449 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
450 			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
451 }
452 EXPORT_SYMBOL_NS(hda_dsp_ipc_int_disable, "SND_SOC_SOF_INTEL_HDA_COMMON");
453 
454 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
455 {
456 	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
457 	struct snd_sof_pdata *pdata = sdev->pdata;
458 	const struct sof_intel_dsp_desc *chip;
459 
460 	chip = get_chip_info(pdata);
461 	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
462 		SOF_HDA_VS_D0I3C_CIP) {
463 		if (!retry--)
464 			return -ETIMEDOUT;
465 		usleep_range(10, 15);
466 	}
467 
468 	return 0;
469 }
470 
471 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
472 {
473 	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
474 
475 	if (pm_ops && pm_ops->set_pm_gate)
476 		return pm_ops->set_pm_gate(sdev, flags);
477 
478 	return 0;
479 }
480 
481 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
482 {
483 	struct snd_sof_pdata *pdata = sdev->pdata;
484 	const struct sof_intel_dsp_desc *chip;
485 	int ret;
486 	u8 reg;
487 
488 	chip = get_chip_info(pdata);
489 
490 	/* Write to D0I3C after Command-In-Progress bit is cleared */
491 	ret = hda_dsp_wait_d0i3c_done(sdev);
492 	if (ret < 0) {
493 		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
494 		return ret;
495 	}
496 
497 	/* Update D0I3C register */
498 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
499 			    SOF_HDA_VS_D0I3C_I3, value);
500 
501 	/*
502 	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
503 	 * A delay is recommended before checking if D0I3C::CIP is cleared
504 	 */
505 	usleep_range(30, 40);
506 
507 	/* Wait for cmd in progress to be cleared before exiting the function */
508 	ret = hda_dsp_wait_d0i3c_done(sdev);
509 	if (ret < 0) {
510 		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
511 		return ret;
512 	}
513 
514 	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
515 	/* Confirm d0i3 state changed with paranoia check */
516 	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
517 		dev_err(sdev->dev, "failed to update D0I3C!\n");
518 		return -EIO;
519 	}
520 
521 	trace_sof_intel_D0I3C_updated(sdev, reg);
522 
523 	return 0;
524 }
525 
526 /*
527  * d0i3 streaming is enabled if all the active streams can
528  * work in d0i3 state and playback is enabled
529  */
530 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
531 {
532 	struct snd_pcm_substream *substream;
533 	struct snd_sof_pcm *spcm;
534 	bool playback_active = false;
535 	int dir;
536 
537 	list_for_each_entry(spcm, &sdev->pcm_list, list) {
538 		for_each_pcm_streams(dir) {
539 			substream = spcm->stream[dir].substream;
540 			if (!substream || !substream->runtime)
541 				continue;
542 
543 			if (!spcm->stream[dir].d0i3_compatible)
544 				return false;
545 
546 			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
547 				playback_active = true;
548 		}
549 	}
550 
551 	return playback_active;
552 }
553 
554 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
555 				const struct sof_dsp_power_state *target_state)
556 {
557 	u32 flags = 0;
558 	int ret;
559 	u8 value = 0;
560 
561 	/*
562 	 * Sanity check for illegal state transitions
563 	 * The only allowed transitions are:
564 	 * 1. D3 -> D0I0
565 	 * 2. D0I0 -> D0I3
566 	 * 3. D0I3 -> D0I0
567 	 */
568 	switch (sdev->dsp_power_state.state) {
569 	case SOF_DSP_PM_D0:
570 		/* Follow the sequence below for D0 substate transitions */
571 		break;
572 	case SOF_DSP_PM_D3:
573 		/* Follow regular flow for D3 -> D0 transition */
574 		return 0;
575 	default:
576 		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
577 			sdev->dsp_power_state.state, target_state->state);
578 		return -EINVAL;
579 	}
580 
581 	/* Set flags and register value for D0 target substate */
582 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
583 		value = SOF_HDA_VS_D0I3C_I3;
584 
585 		/*
586 		 * Trace DMA need to be disabled when the DSP enters
587 		 * D0I3 for S0Ix suspend, but it can be kept enabled
588 		 * when the DSP enters D0I3 while the system is in S0
589 		 * for debug purpose.
590 		 */
591 		if (!sdev->fw_trace_is_supported ||
592 		    !hda_enable_trace_D0I3_S0 ||
593 		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
594 			flags = HDA_PM_NO_DMA_TRACE;
595 
596 		if (hda_dsp_d0i3_streaming_applicable(sdev))
597 			flags |= HDA_PM_PG_STREAMING;
598 	} else {
599 		/* prevent power gating in D0I0 */
600 		flags = HDA_PM_PPG;
601 	}
602 
603 	/* update D0I3C register */
604 	ret = hda_dsp_update_d0i3c_register(sdev, value);
605 	if (ret < 0)
606 		return ret;
607 
608 	/*
609 	 * Notify the DSP of the state change.
610 	 * If this IPC fails, revert the D0I3C register update in order
611 	 * to prevent partial state change.
612 	 */
613 	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
614 	if (ret < 0) {
615 		dev_err(sdev->dev,
616 			"error: PM_GATE ipc error %d\n", ret);
617 		goto revert;
618 	}
619 
620 	return ret;
621 
622 revert:
623 	/* fallback to the previous register value */
624 	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
625 
626 	/*
627 	 * This can fail but return the IPC error to signal that
628 	 * the state change failed.
629 	 */
630 	hda_dsp_update_d0i3c_register(sdev, value);
631 
632 	return ret;
633 }
634 
635 /* helper to log DSP state */
636 static void hda_dsp_state_log(struct snd_sof_dev *sdev)
637 {
638 	switch (sdev->dsp_power_state.state) {
639 	case SOF_DSP_PM_D0:
640 		switch (sdev->dsp_power_state.substate) {
641 		case SOF_HDA_DSP_PM_D0I0:
642 			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
643 			break;
644 		case SOF_HDA_DSP_PM_D0I3:
645 			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
646 			break;
647 		default:
648 			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
649 				sdev->dsp_power_state.substate);
650 			break;
651 		}
652 		break;
653 	case SOF_DSP_PM_D1:
654 		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
655 		break;
656 	case SOF_DSP_PM_D2:
657 		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
658 		break;
659 	case SOF_DSP_PM_D3:
660 		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
661 		break;
662 	default:
663 		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
664 			sdev->dsp_power_state.state);
665 		break;
666 	}
667 }
668 
669 /*
670  * All DSP power state transitions are initiated by the driver.
671  * If the requested state change fails, the error is simply returned.
672  * Further state transitions are attempted only when the set_power_save() op
673  * is called again either because of a new IPC sent to the DSP or
674  * during system suspend/resume.
675  */
676 static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
677 				   const struct sof_dsp_power_state *target_state)
678 {
679 	int ret = 0;
680 
681 	switch (target_state->state) {
682 	case SOF_DSP_PM_D0:
683 		ret = hda_dsp_set_D0_state(sdev, target_state);
684 		break;
685 	case SOF_DSP_PM_D3:
686 		/* The only allowed transition is: D0I0 -> D3 */
687 		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
688 		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
689 			break;
690 
691 		dev_err(sdev->dev,
692 			"error: transition from %d to %d not allowed\n",
693 			sdev->dsp_power_state.state, target_state->state);
694 		return -EINVAL;
695 	default:
696 		dev_err(sdev->dev, "error: target state unsupported %d\n",
697 			target_state->state);
698 		return -EINVAL;
699 	}
700 	if (ret < 0) {
701 		dev_err(sdev->dev,
702 			"failed to set requested target DSP state %d substate %d\n",
703 			target_state->state, target_state->substate);
704 		return ret;
705 	}
706 
707 	sdev->dsp_power_state = *target_state;
708 	hda_dsp_state_log(sdev);
709 	return ret;
710 }
711 
712 int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
713 				 const struct sof_dsp_power_state *target_state)
714 {
715 	/*
716 	 * When the DSP is already in D0I3 and the target state is D0I3,
717 	 * it could be the case that the DSP is in D0I3 during S0
718 	 * and the system is suspending to S0Ix. Therefore,
719 	 * hda_dsp_set_D0_state() must be called to disable trace DMA
720 	 * by sending the PM_GATE IPC to the FW.
721 	 */
722 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
723 	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
724 		return hda_dsp_set_power_state(sdev, target_state);
725 
726 	/*
727 	 * For all other cases, return without doing anything if
728 	 * the DSP is already in the target state.
729 	 */
730 	if (target_state->state == sdev->dsp_power_state.state &&
731 	    target_state->substate == sdev->dsp_power_state.substate)
732 		return 0;
733 
734 	return hda_dsp_set_power_state(sdev, target_state);
735 }
736 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc3, "SND_SOC_SOF_INTEL_HDA_COMMON");
737 
738 int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
739 				 const struct sof_dsp_power_state *target_state)
740 {
741 	/* Return without doing anything if the DSP is already in the target state */
742 	if (target_state->state == sdev->dsp_power_state.state &&
743 	    target_state->substate == sdev->dsp_power_state.substate)
744 		return 0;
745 
746 	return hda_dsp_set_power_state(sdev, target_state);
747 }
748 EXPORT_SYMBOL_NS(hda_dsp_set_power_state_ipc4, "SND_SOC_SOF_INTEL_HDA_COMMON");
749 
750 /*
751  * Audio DSP states may transform as below:-
752  *
753  *                                         Opportunistic D0I3 in S0
754  *     Runtime    +---------------------+  Delayed D0i3 work timeout
755  *     suspend    |                     +--------------------+
756  *   +------------+       D0I0(active)  |                    |
757  *   |            |                     <---------------+    |
758  *   |   +-------->                     |    New IPC	|    |
759  *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
760  *   |   |resume     |  |         |  |			|    |
761  *   |   |           |  |         |  |			|    |
762  *   |   |     System|  |         |  |			|    |
763  *   |   |     resume|  | S3/S0IX |  |                  |    |
764  *   |   |	     |  | suspend |  | S0IX             |    |
765  *   |   |           |  |         |  |suspend           |    |
766  *   |   |           |  |         |  |                  |    |
767  *   |   |           |  |         |  |                  |    |
768  * +-v---+-----------+--v-------+ |  |           +------+----v----+
769  * |                            | |  +----------->                |
770  * |       D3 (suspended)       | |              |      D0I3      |
771  * |                            | +--------------+                |
772  * |                            |  System resume |                |
773  * +----------------------------+		 +----------------+
774  *
775  * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
776  *		 ignored the suspend trigger. Otherwise the DSP
777  *		 is in D3.
778  */
779 
780 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
781 {
782 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
783 	const struct sof_intel_dsp_desc *chip = hda->desc;
784 	struct hdac_bus *bus = sof_to_bus(sdev);
785 	bool imr_lost = false;
786 	int ret, j;
787 
788 	/*
789 	 * The memory used for IMR boot loses its content in deeper than S3
790 	 * state on CAVS platforms.
791 	 * On ACE platforms due to the system architecture the IMR content is
792 	 * lost at S3 state already, they are tailored for s2idle use.
793 	 * We must not try IMR boot on next power up in these cases as it will
794 	 * fail.
795 	 */
796 	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
797 	    (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 &&
798 	     sdev->system_suspend_target == SOF_SUSPEND_S3))
799 		imr_lost = true;
800 
801 	/*
802 	 * In case of firmware crash or boot failure set the skip_imr_boot to true
803 	 * as well in order to try to re-load the firmware to do a 'cold' boot.
804 	 */
805 	if (imr_lost || sdev->fw_state == SOF_FW_CRASHED ||
806 	    sdev->fw_state == SOF_FW_BOOT_FAILED)
807 		hda->skip_imr_boot = true;
808 
809 	ret = chip->disable_interrupts(sdev);
810 	if (ret < 0)
811 		return ret;
812 
813 	/* make sure that no irq handler is pending before shutdown */
814 	synchronize_irq(sdev->ipc_irq);
815 
816 	hda_codec_jack_wake_enable(sdev, runtime_suspend);
817 
818 	/* power down all hda links */
819 	hda_bus_ml_suspend(bus);
820 
821 	if (sdev->dspless_mode_selected)
822 		goto skip_dsp;
823 
824 	ret = chip->power_down_dsp(sdev);
825 	if (ret < 0) {
826 		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
827 		return ret;
828 	}
829 
830 	/* reset ref counts for all cores */
831 	for (j = 0; j < chip->cores_num; j++)
832 		sdev->dsp_core_ref_count[j] = 0;
833 
834 	/* disable ppcap interrupt */
835 	hda_dsp_ctrl_ppcap_enable(sdev, false);
836 	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
837 skip_dsp:
838 
839 	/* disable hda bus irq and streams */
840 	hda_dsp_ctrl_stop_chip(sdev);
841 
842 	/* disable LP retention mode */
843 	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
844 				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
845 
846 	/* reset controller */
847 	ret = hda_dsp_ctrl_link_reset(sdev, true);
848 	if (ret < 0) {
849 		dev_err(sdev->dev,
850 			"error: failed to reset controller during suspend\n");
851 		return ret;
852 	}
853 
854 	/* display codec can powered off after link reset */
855 	hda_codec_i915_display_power(sdev, false);
856 
857 	return 0;
858 }
859 
860 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
861 {
862 	int ret;
863 
864 	/* display codec must be powered before link reset */
865 	hda_codec_i915_display_power(sdev, true);
866 
867 	/*
868 	 * clear TCSEL to clear playback on some HD Audio
869 	 * codecs. PCI TCSEL is defined in the Intel manuals.
870 	 */
871 	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
872 
873 	/* reset and start hda controller */
874 	ret = hda_dsp_ctrl_init_chip(sdev, false);
875 	if (ret < 0) {
876 		dev_err(sdev->dev,
877 			"error: failed to start controller after resume\n");
878 		goto cleanup;
879 	}
880 
881 	/* check jack status */
882 	if (runtime_resume) {
883 		hda_codec_jack_wake_enable(sdev, false);
884 		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
885 			hda_codec_jack_check(sdev);
886 	}
887 
888 	if (!sdev->dspless_mode_selected) {
889 		/* enable ppcap interrupt */
890 		hda_dsp_ctrl_ppcap_enable(sdev, true);
891 		hda_dsp_ctrl_ppcap_int_enable(sdev, true);
892 	}
893 
894 cleanup:
895 	/* display codec can powered off after controller init */
896 	hda_codec_i915_display_power(sdev, false);
897 
898 	return 0;
899 }
900 
901 int hda_dsp_resume(struct snd_sof_dev *sdev)
902 {
903 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
904 	struct hdac_bus *bus = sof_to_bus(sdev);
905 	struct pci_dev *pci = to_pci_dev(sdev->dev);
906 	const struct sof_dsp_power_state target_state = {
907 		.state = SOF_DSP_PM_D0,
908 		.substate = SOF_HDA_DSP_PM_D0I0,
909 	};
910 	int ret;
911 
912 	/* resume from D0I3 */
913 	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
914 		ret = hda_bus_ml_resume(bus);
915 		if (ret < 0) {
916 			dev_err(sdev->dev,
917 				"error %d in %s: failed to power up links",
918 				ret, __func__);
919 			return ret;
920 		}
921 
922 		/* set up CORB/RIRB buffers if was on before suspend */
923 		hda_codec_resume_cmd_io(sdev);
924 
925 		/* Set DSP power state */
926 		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
927 		if (ret < 0) {
928 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
929 				target_state.state, target_state.substate);
930 			return ret;
931 		}
932 
933 		/* restore L1SEN bit */
934 		if (hda->l1_disabled)
935 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
936 						HDA_VS_INTEL_EM2,
937 						HDA_VS_INTEL_EM2_L1SEN, 0);
938 
939 		/* restore and disable the system wakeup */
940 		pci_restore_state(pci);
941 		disable_irq_wake(pci->irq);
942 		return 0;
943 	}
944 
945 	/* init hda controller. DSP cores will be powered up during fw boot */
946 	ret = hda_resume(sdev, false);
947 	if (ret < 0)
948 		return ret;
949 
950 	return snd_sof_dsp_set_power_state(sdev, &target_state);
951 }
952 EXPORT_SYMBOL_NS(hda_dsp_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
953 
954 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
955 {
956 	const struct sof_dsp_power_state target_state = {
957 		.state = SOF_DSP_PM_D0,
958 	};
959 	int ret;
960 
961 	/* init hda controller. DSP cores will be powered up during fw boot */
962 	ret = hda_resume(sdev, true);
963 	if (ret < 0)
964 		return ret;
965 
966 	return snd_sof_dsp_set_power_state(sdev, &target_state);
967 }
968 EXPORT_SYMBOL_NS(hda_dsp_runtime_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
969 
970 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
971 {
972 	struct hdac_bus *hbus = sof_to_bus(sdev);
973 
974 	if (hbus->codec_powered) {
975 		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
976 			(unsigned int)hbus->codec_powered);
977 		return -EBUSY;
978 	}
979 
980 	return 0;
981 }
982 EXPORT_SYMBOL_NS(hda_dsp_runtime_idle, "SND_SOC_SOF_INTEL_HDA_COMMON");
983 
984 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
985 {
986 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
987 	const struct sof_dsp_power_state target_state = {
988 		.state = SOF_DSP_PM_D3,
989 	};
990 	int ret;
991 
992 	if (!sdev->dspless_mode_selected) {
993 		/* cancel any attempt for DSP D0I3 */
994 		cancel_delayed_work_sync(&hda->d0i3_work);
995 
996 		/* Cancel the microphone privacy work if mic privacy is active */
997 		if (hda->mic_privacy.active)
998 			cancel_work_sync(&hda->mic_privacy.work);
999 	}
1000 
1001 	/* stop hda controller and power dsp off */
1002 	ret = hda_suspend(sdev, true);
1003 	if (ret < 0)
1004 		return ret;
1005 
1006 	return snd_sof_dsp_set_power_state(sdev, &target_state);
1007 }
1008 EXPORT_SYMBOL_NS(hda_dsp_runtime_suspend, "SND_SOC_SOF_INTEL_HDA_COMMON");
1009 
1010 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
1011 {
1012 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
1013 	struct hdac_bus *bus = sof_to_bus(sdev);
1014 	struct pci_dev *pci = to_pci_dev(sdev->dev);
1015 	const struct sof_dsp_power_state target_dsp_state = {
1016 		.state = target_state,
1017 		.substate = target_state == SOF_DSP_PM_D0 ?
1018 				SOF_HDA_DSP_PM_D0I3 : 0,
1019 	};
1020 	int ret;
1021 
1022 	if (!sdev->dspless_mode_selected) {
1023 		/* cancel any attempt for DSP D0I3 */
1024 		cancel_delayed_work_sync(&hda->d0i3_work);
1025 
1026 		/* Cancel the microphone privacy work if mic privacy is active */
1027 		if (hda->mic_privacy.active)
1028 			cancel_work_sync(&hda->mic_privacy.work);
1029 	}
1030 
1031 	if (target_state == SOF_DSP_PM_D0) {
1032 		/* Set DSP power state */
1033 		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
1034 		if (ret < 0) {
1035 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
1036 				target_dsp_state.state,
1037 				target_dsp_state.substate);
1038 			return ret;
1039 		}
1040 
1041 		/* enable L1SEN to make sure the system can enter S0Ix */
1042 		if (hda->l1_disabled)
1043 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
1044 						HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
1045 
1046 		/* stop the CORB/RIRB DMA if it is On */
1047 		hda_codec_suspend_cmd_io(sdev);
1048 
1049 		/* no link can be powered in s0ix state */
1050 		ret = hda_bus_ml_suspend(bus);
1051 		if (ret < 0) {
1052 			dev_err(sdev->dev,
1053 				"error %d in %s: failed to power down links",
1054 				ret, __func__);
1055 			return ret;
1056 		}
1057 
1058 		/* enable the system waking up via IPC IRQ */
1059 		enable_irq_wake(pci->irq);
1060 		pci_save_state(pci);
1061 		return 0;
1062 	}
1063 
1064 	/* stop hda controller and power dsp off */
1065 	ret = hda_suspend(sdev, false);
1066 	if (ret < 0) {
1067 		dev_err(bus->dev, "error: suspending dsp\n");
1068 		return ret;
1069 	}
1070 
1071 	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
1072 }
1073 EXPORT_SYMBOL_NS(hda_dsp_suspend, "SND_SOC_SOF_INTEL_HDA_COMMON");
1074 
1075 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
1076 {
1077 	struct hdac_bus *bus = sof_to_bus(sdev);
1078 	struct hdac_stream *s;
1079 	unsigned int active_streams = 0;
1080 	int sd_offset;
1081 	u32 val;
1082 
1083 	list_for_each_entry(s, &bus->stream_list, list) {
1084 		sd_offset = SOF_STREAM_SD_OFFSET(s);
1085 		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
1086 				       sd_offset);
1087 		if (val & SOF_HDA_SD_CTL_DMA_START)
1088 			active_streams |= BIT(s->index);
1089 	}
1090 
1091 	return active_streams;
1092 }
1093 
1094 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
1095 {
1096 	int ret;
1097 
1098 	/*
1099 	 * Do not assume a certain timing between the prior
1100 	 * suspend flow, and running of this quirk function.
1101 	 * This is needed if the controller was just put
1102 	 * to reset before calling this function.
1103 	 */
1104 	usleep_range(500, 1000);
1105 
1106 	/*
1107 	 * Take controller out of reset to flush DMA
1108 	 * transactions.
1109 	 */
1110 	ret = hda_dsp_ctrl_link_reset(sdev, false);
1111 	if (ret < 0)
1112 		return ret;
1113 
1114 	usleep_range(500, 1000);
1115 
1116 	/* Restore state for shutdown, back to reset */
1117 	ret = hda_dsp_ctrl_link_reset(sdev, true);
1118 	if (ret < 0)
1119 		return ret;
1120 
1121 	return ret;
1122 }
1123 
1124 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1125 {
1126 	unsigned int active_streams;
1127 	int ret, ret2;
1128 
1129 	/* check if DMA cleanup has been successful */
1130 	active_streams = hda_dsp_check_for_dma_streams(sdev);
1131 
1132 	sdev->system_suspend_target = SOF_SUSPEND_S3;
1133 	ret = snd_sof_suspend(sdev->dev);
1134 
1135 	if (active_streams) {
1136 		dev_warn(sdev->dev,
1137 			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1138 			 active_streams);
1139 		ret2 = hda_dsp_s5_quirk(sdev);
1140 		if (ret2 < 0)
1141 			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1142 	}
1143 
1144 	return ret;
1145 }
1146 EXPORT_SYMBOL_NS(hda_dsp_shutdown_dma_flush, "SND_SOC_SOF_INTEL_HDA_COMMON");
1147 
1148 int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1149 {
1150 	sdev->system_suspend_target = SOF_SUSPEND_S3;
1151 	return snd_sof_suspend(sdev->dev);
1152 }
1153 EXPORT_SYMBOL_NS(hda_dsp_shutdown, "SND_SOC_SOF_INTEL_HDA_COMMON");
1154 
1155 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1156 {
1157 	int ret;
1158 
1159 	/* make sure all DAI resources are freed */
1160 	ret = hda_dsp_dais_suspend(sdev);
1161 	if (ret < 0)
1162 		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1163 
1164 	return ret;
1165 }
1166 EXPORT_SYMBOL_NS(hda_dsp_set_hw_params_upon_resume, "SND_SOC_SOF_INTEL_HDA_COMMON");
1167 
1168 void hda_dsp_d0i3_work(struct work_struct *work)
1169 {
1170 	struct sof_intel_hda_dev *hdev = container_of(work,
1171 						      struct sof_intel_hda_dev,
1172 						      d0i3_work.work);
1173 	struct hdac_bus *bus = &hdev->hbus.core;
1174 	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1175 	struct sof_dsp_power_state target_state = {
1176 		.state = SOF_DSP_PM_D0,
1177 		.substate = SOF_HDA_DSP_PM_D0I3,
1178 	};
1179 	int ret;
1180 
1181 	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1182 	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1183 		/* remain in D0I0 */
1184 		return;
1185 
1186 	/* This can fail but error cannot be propagated */
1187 	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1188 	if (ret < 0)
1189 		dev_err_ratelimited(sdev->dev,
1190 				    "error: failed to set DSP state %d substate %d\n",
1191 				    target_state.state, target_state.substate);
1192 }
1193 EXPORT_SYMBOL_NS(hda_dsp_d0i3_work, "SND_SOC_SOF_INTEL_HDA_COMMON");
1194 
1195 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1196 {
1197 	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1198 	int ret, ret1;
1199 
1200 	/* power up core */
1201 	ret = hda_dsp_enable_core(sdev, BIT(core));
1202 	if (ret < 0) {
1203 		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1204 			core, ret);
1205 		return ret;
1206 	}
1207 
1208 	/* No need to send IPC for primary core or if FW boot is not complete */
1209 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1210 		return 0;
1211 
1212 	/* No need to continue the set_core_state ops is not available */
1213 	if (!pm_ops->set_core_state)
1214 		return 0;
1215 
1216 	/* Now notify DSP for secondary cores */
1217 	ret = pm_ops->set_core_state(sdev, core, true);
1218 	if (ret < 0) {
1219 		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1220 			core, ret);
1221 		goto power_down;
1222 	}
1223 
1224 	return ret;
1225 
1226 power_down:
1227 	/* power down core if it is host managed and return the original error if this fails too */
1228 	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1229 	if (ret1 < 0)
1230 		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1231 
1232 	return ret;
1233 }
1234 EXPORT_SYMBOL_NS(hda_dsp_core_get, "SND_SOC_SOF_INTEL_HDA_COMMON");
1235 
1236 #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
1237 void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
1238 {
1239 	struct sof_intel_hda_dev *hdev;
1240 
1241 	hdev = sdev->pdata->hw_pdata;
1242 
1243 	if (!hdev->sdw)
1244 		return;
1245 
1246 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2,
1247 				HDA_DSP_REG_ADSPIC2_SNDW,
1248 				enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0);
1249 }
1250 EXPORT_SYMBOL_NS(hda_common_enable_sdw_irq, "SND_SOC_SOF_INTEL_HDA_COMMON");
1251 
1252 void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
1253 {
1254 	u32 interface_mask = hda_get_interface_mask(sdev);
1255 	const struct sof_intel_dsp_desc *chip;
1256 
1257 	if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
1258 		return;
1259 
1260 	chip = get_chip_info(sdev->pdata);
1261 	if (chip && chip->enable_sdw_irq)
1262 		chip->enable_sdw_irq(sdev, enable);
1263 }
1264 EXPORT_SYMBOL_NS(hda_sdw_int_enable, "SND_SOC_SOF_INTEL_HDA_COMMON");
1265 
1266 int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev)
1267 {
1268 	struct sof_intel_hda_dev *hdev;
1269 	struct sdw_intel_ctx *ctx;
1270 	u32 caps;
1271 
1272 	hdev = sdev->pdata->hw_pdata;
1273 	ctx = hdev->sdw;
1274 
1275 	caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP);
1276 	caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
1277 
1278 	/* Check HW supported vs property value */
1279 	if (caps < ctx->count) {
1280 		dev_err(sdev->dev,
1281 			"%s: BIOS master count %d is larger than hardware capabilities %d\n",
1282 			__func__, ctx->count, caps);
1283 		return -EINVAL;
1284 	}
1285 
1286 	return 0;
1287 }
1288 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_common, "SND_SOC_SOF_INTEL_HDA_COMMON");
1289 
1290 int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev)
1291 {
1292 	struct sof_intel_hda_dev *hdev;
1293 	struct sdw_intel_ctx *ctx;
1294 	struct hdac_bus *bus;
1295 	u32 slcount;
1296 
1297 	bus = sof_to_bus(sdev);
1298 
1299 	hdev = sdev->pdata->hw_pdata;
1300 	ctx = hdev->sdw;
1301 
1302 	slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
1303 
1304 	/* Check HW supported vs property value */
1305 	if (slcount < ctx->count) {
1306 		dev_err(sdev->dev,
1307 			"%s: BIOS master count %d is larger than hardware capabilities %d\n",
1308 			__func__, ctx->count, slcount);
1309 		return -EINVAL;
1310 	}
1311 
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL_NS(hda_sdw_check_lcount_ext, "SND_SOC_SOF_INTEL_HDA_COMMON");
1315 
1316 int hda_sdw_check_lcount(struct snd_sof_dev *sdev)
1317 {
1318 	const struct sof_intel_dsp_desc *chip;
1319 
1320 	chip = get_chip_info(sdev->pdata);
1321 	if (chip && chip->read_sdw_lcount)
1322 		return chip->read_sdw_lcount(sdev);
1323 
1324 	return 0;
1325 }
1326 EXPORT_SYMBOL_NS(hda_sdw_check_lcount, "SND_SOC_SOF_INTEL_HDA_COMMON");
1327 
1328 void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
1329 {
1330 	u32 interface_mask = hda_get_interface_mask(sdev);
1331 	const struct sof_intel_dsp_desc *chip;
1332 
1333 	if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
1334 		return;
1335 
1336 	chip = get_chip_info(sdev->pdata);
1337 	if (chip && chip->sdw_process_wakeen)
1338 		chip->sdw_process_wakeen(sdev);
1339 }
1340 EXPORT_SYMBOL_NS(hda_sdw_process_wakeen, "SND_SOC_SOF_INTEL_HDA_COMMON");
1341 
1342 #endif
1343 
1344 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1345 {
1346 	hda_sdw_int_enable(sdev, false);
1347 	hda_dsp_ipc_int_disable(sdev);
1348 
1349 	return 0;
1350 }
1351 EXPORT_SYMBOL_NS(hda_dsp_disable_interrupts, "SND_SOC_SOF_INTEL_HDA_COMMON");
1352 
1353 static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
1354 	{HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
1355 	{HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
1356 	{HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
1357 	{HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
1358 	{HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
1359 	{HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
1360 	{HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
1361 	{HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
1362 	{HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
1363 	{HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
1364 	{HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
1365 	{HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
1366 	{HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
1367 	{HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
1368 	{HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
1369 	{HDA_DSP_ROM_NULL_FW_ENTRY,	"error: null FW entry point"},
1370 };
1371 
1372 #define FSR_ROM_STATE_ENTRY(state)	{FSR_STATE_ROM_##state, #state}
1373 static const struct hda_dsp_msg_code cavs_fsr_rom_state_names[] = {
1374 	FSR_ROM_STATE_ENTRY(INIT),
1375 	FSR_ROM_STATE_ENTRY(INIT_DONE),
1376 	FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
1377 	FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
1378 	FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
1379 	FSR_ROM_STATE_ENTRY(FW_ENTERED),
1380 	FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
1381 	FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
1382 	FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
1383 	FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
1384 	/* CSE states */
1385 	FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
1386 	FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
1387 	FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
1388 	FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
1389 	FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
1390 	FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
1391 	FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
1392 	FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
1393 	FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
1394 };
1395 
1396 static const struct hda_dsp_msg_code ace_fsr_rom_state_names[] = {
1397 	FSR_ROM_STATE_ENTRY(INIT),
1398 	FSR_ROM_STATE_ENTRY(INIT_DONE),
1399 	FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
1400 	FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
1401 	FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
1402 	FSR_ROM_STATE_ENTRY(FW_ENTERED),
1403 	FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
1404 	FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
1405 	FSR_ROM_STATE_ENTRY(RESET_VECTOR_DONE),
1406 	FSR_ROM_STATE_ENTRY(PURGE_BOOT),
1407 	FSR_ROM_STATE_ENTRY(RESTORE_BOOT),
1408 	FSR_ROM_STATE_ENTRY(FW_ENTRY_POINT),
1409 	FSR_ROM_STATE_ENTRY(VALIDATE_PUB_KEY),
1410 	FSR_ROM_STATE_ENTRY(POWER_DOWN_HPSRAM),
1411 	FSR_ROM_STATE_ENTRY(POWER_DOWN_ULPSRAM),
1412 	FSR_ROM_STATE_ENTRY(POWER_UP_ULPSRAM_STACK),
1413 	FSR_ROM_STATE_ENTRY(POWER_UP_HPSRAM_DMA),
1414 	FSR_ROM_STATE_ENTRY(BEFORE_EP_POINTER_READ),
1415 	FSR_ROM_STATE_ENTRY(VALIDATE_MANIFEST),
1416 	FSR_ROM_STATE_ENTRY(VALIDATE_FW_MODULE),
1417 	FSR_ROM_STATE_ENTRY(PROTECT_IMR_REGION),
1418 	FSR_ROM_STATE_ENTRY(PUSH_MODEL_ROUTINE),
1419 	FSR_ROM_STATE_ENTRY(PULL_MODEL_ROUTINE),
1420 	FSR_ROM_STATE_ENTRY(VALIDATE_PKG_DIR),
1421 	FSR_ROM_STATE_ENTRY(VALIDATE_CPD),
1422 	FSR_ROM_STATE_ENTRY(VALIDATE_CSS_MAN_HEADER),
1423 	FSR_ROM_STATE_ENTRY(VALIDATE_BLOB_SVN),
1424 	FSR_ROM_STATE_ENTRY(VERIFY_IFWI_PARTITION),
1425 	FSR_ROM_STATE_ENTRY(REMOVE_ACCESS_CONTROL),
1426 	FSR_ROM_STATE_ENTRY(AUTH_BYPASS),
1427 	FSR_ROM_STATE_ENTRY(AUTH_ENABLED),
1428 	FSR_ROM_STATE_ENTRY(INIT_DMA),
1429 	FSR_ROM_STATE_ENTRY(PURGE_FW_ENTRY),
1430 	FSR_ROM_STATE_ENTRY(PURGE_FW_END),
1431 	FSR_ROM_STATE_ENTRY(CLEAN_UP_BSS_DONE),
1432 	FSR_ROM_STATE_ENTRY(IMR_RESTORE_ENTRY),
1433 	FSR_ROM_STATE_ENTRY(IMR_RESTORE_END),
1434 	FSR_ROM_STATE_ENTRY(FW_MANIFEST_IN_DMA_BUFF),
1435 	FSR_ROM_STATE_ENTRY(LOAD_CSE_MAN_TO_IMR),
1436 	FSR_ROM_STATE_ENTRY(LOAD_FW_MAN_TO_IMR),
1437 	FSR_ROM_STATE_ENTRY(LOAD_FW_CODE_TO_IMR),
1438 	FSR_ROM_STATE_ENTRY(FW_LOADING_DONE),
1439 	FSR_ROM_STATE_ENTRY(FW_CODE_LOADED),
1440 	FSR_ROM_STATE_ENTRY(VERIFY_IMAGE_TYPE),
1441 	FSR_ROM_STATE_ENTRY(AUTH_API_INIT),
1442 	FSR_ROM_STATE_ENTRY(AUTH_API_PROC),
1443 	FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_BUSY),
1444 	FSR_ROM_STATE_ENTRY(AUTH_API_FIRST_RESULT),
1445 	FSR_ROM_STATE_ENTRY(AUTH_API_CLEANUP),
1446 };
1447 
1448 #define FSR_BRINGUP_STATE_ENTRY(state)	{FSR_STATE_BRINGUP_##state, #state}
1449 static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
1450 	FSR_BRINGUP_STATE_ENTRY(INIT),
1451 	FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
1452 	FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
1453 	FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
1454 	FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
1455 	FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
1456 };
1457 
1458 #define FSR_WAIT_STATE_ENTRY(state)	{FSR_WAIT_FOR_##state, #state}
1459 static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
1460 	FSR_WAIT_STATE_ENTRY(IPC_BUSY),
1461 	FSR_WAIT_STATE_ENTRY(IPC_DONE),
1462 	FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
1463 	FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
1464 	FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
1465 	FSR_WAIT_STATE_ENTRY(CSE_CSR),
1466 };
1467 
1468 #define FSR_MODULE_NAME_ENTRY(mod)	[FSR_MOD_##mod] = #mod
1469 static const char * const fsr_module_names[] = {
1470 	FSR_MODULE_NAME_ENTRY(ROM),
1471 	FSR_MODULE_NAME_ENTRY(ROM_BYP),
1472 	FSR_MODULE_NAME_ENTRY(BASE_FW),
1473 	FSR_MODULE_NAME_ENTRY(LP_BOOT),
1474 	FSR_MODULE_NAME_ENTRY(BRNGUP),
1475 	FSR_MODULE_NAME_ENTRY(ROM_EXT),
1476 };
1477 
1478 static const char *
1479 hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
1480 		       size_t array_size)
1481 {
1482 	int i;
1483 
1484 	for (i = 0; i < array_size; i++) {
1485 		if (code == msg_code[i].code)
1486 			return msg_code[i].text;
1487 	}
1488 
1489 	return NULL;
1490 }
1491 
1492 void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
1493 {
1494 	const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
1495 	const char *state_text, *error_text, *module_text;
1496 	u32 fsr, state, wait_state, module, error_code;
1497 
1498 	fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
1499 	state = FSR_TO_STATE_CODE(fsr);
1500 	wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
1501 	module = FSR_TO_MODULE_CODE(fsr);
1502 
1503 	if (module > FSR_MOD_ROM_EXT)
1504 		module_text = "unknown";
1505 	else
1506 		module_text = fsr_module_names[module];
1507 
1508 	if (module == FSR_MOD_BRNGUP) {
1509 		state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
1510 						    ARRAY_SIZE(fsr_bringup_state_names));
1511 	} else {
1512 		if (chip->hw_ip_version < SOF_INTEL_ACE_1_0)
1513 			state_text = hda_dsp_get_state_text(state,
1514 							cavs_fsr_rom_state_names,
1515 							ARRAY_SIZE(cavs_fsr_rom_state_names));
1516 		else
1517 			state_text = hda_dsp_get_state_text(state,
1518 							ace_fsr_rom_state_names,
1519 							ARRAY_SIZE(ace_fsr_rom_state_names));
1520 	}
1521 
1522 	/* not for us, must be generic sof message */
1523 	if (!state_text) {
1524 		dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
1525 		return;
1526 	}
1527 
1528 	if (wait_state) {
1529 		const char *wait_state_text;
1530 
1531 		wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
1532 							 ARRAY_SIZE(fsr_wait_state_names));
1533 		if (!wait_state_text)
1534 			wait_state_text = "unknown";
1535 
1536 		dev_printk(level, sdev->dev,
1537 			   "%#010x: module: %s, state: %s, waiting for: %s, %s\n",
1538 			   fsr, module_text, state_text, wait_state_text,
1539 			   fsr & FSR_HALTED ? "not running" : "running");
1540 	} else {
1541 		dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
1542 			   fsr, module_text, state_text,
1543 			   fsr & FSR_HALTED ? "not running" : "running");
1544 	}
1545 
1546 	error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
1547 	if (!error_code)
1548 		return;
1549 
1550 	error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
1551 					    ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
1552 	if (!error_text)
1553 		error_text = "unknown";
1554 
1555 	if (state == FSR_STATE_FW_ENTERED)
1556 		dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
1557 			   error_text);
1558 	else
1559 		dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
1560 			   error_text);
1561 }
1562 EXPORT_SYMBOL_NS(hda_dsp_get_state, "SND_SOC_SOF_INTEL_HDA_COMMON");
1563 
1564 static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
1565 				  struct sof_ipc_dsp_oops_xtensa *xoops,
1566 				  struct sof_ipc_panic_info *panic_info,
1567 				  u32 *stack, size_t stack_words)
1568 {
1569 	u32 offset = sdev->dsp_oops_offset;
1570 
1571 	/* first read registers */
1572 	sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
1573 
1574 	/* note: variable AR register array is not read */
1575 
1576 	/* then get panic info */
1577 	if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
1578 		dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
1579 			xoops->arch_hdr.totalsize);
1580 		return;
1581 	}
1582 	offset += xoops->arch_hdr.totalsize;
1583 	sof_block_read(sdev, sdev->mmio_bar, offset,
1584 		       panic_info, sizeof(*panic_info));
1585 
1586 	/* then get the stack */
1587 	offset += sizeof(*panic_info);
1588 	sof_block_read(sdev, sdev->mmio_bar, offset, stack,
1589 		       stack_words * sizeof(u32));
1590 }
1591 
1592 /* dump the first 8 dwords representing the extended ROM status */
1593 void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level,
1594 				 u32 flags)
1595 {
1596 	const struct sof_intel_dsp_desc *chip;
1597 	char msg[128];
1598 	int len = 0;
1599 	u32 value;
1600 	int i;
1601 
1602 	chip = get_chip_info(sdev->pdata);
1603 	for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
1604 		value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
1605 		len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
1606 	}
1607 
1608 	dev_printk(level, sdev->dev, "extended rom status: %s", msg);
1609 
1610 }
1611 
1612 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
1613 {
1614 	char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
1615 	struct sof_ipc_dsp_oops_xtensa xoops;
1616 	struct sof_ipc_panic_info panic_info;
1617 	u32 stack[HDA_DSP_STACK_DUMP_SIZE];
1618 
1619 	/* print ROM/FW status */
1620 	hda_dsp_get_state(sdev, level);
1621 
1622 	/* The firmware register dump only available with IPC3 */
1623 	if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC_TYPE_3) {
1624 		u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
1625 		u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
1626 
1627 		hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
1628 				      HDA_DSP_STACK_DUMP_SIZE);
1629 		sof_print_oops_and_stack(sdev, level, status, panic, &xoops,
1630 					 &panic_info, stack, HDA_DSP_STACK_DUMP_SIZE);
1631 	} else {
1632 		hda_dsp_dump_ext_rom_status(sdev, level, flags);
1633 	}
1634 }
1635 EXPORT_SYMBOL_NS(hda_dsp_dump, "SND_SOC_SOF_INTEL_HDA_COMMON");
1636