xref: /linux/sound/soc/sof/intel/hda-ipc.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for generic Intel audio DSP HDA IP
16  */
17 
18 #include <sound/hda_register.h>
19 #include <sound/sof/ipc4/header.h>
20 #include <trace/events/sof_intel.h>
21 #include "../ops.h"
22 #include "hda.h"
23 #include "telemetry.h"
24 
25 EXPORT_TRACEPOINT_SYMBOL(sof_intel_ipc_firmware_initiated);
26 EXPORT_TRACEPOINT_SYMBOL(sof_intel_ipc_firmware_response);
27 EXPORT_TRACEPOINT_SYMBOL(sof_intel_hda_irq_ipc_check);
28 
29 static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
30 {
31 	/*
32 	 * tell DSP cmd is done - clear busy
33 	 * interrupt and send reply msg to dsp
34 	 */
35 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
36 				       HDA_DSP_REG_HIPCT,
37 				       HDA_DSP_REG_HIPCT_BUSY,
38 				       HDA_DSP_REG_HIPCT_BUSY);
39 
40 	/* unmask BUSY interrupt */
41 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
42 				HDA_DSP_REG_HIPCCTL,
43 				HDA_DSP_REG_HIPCCTL_BUSY,
44 				HDA_DSP_REG_HIPCCTL_BUSY);
45 }
46 
47 static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
48 {
49 	/*
50 	 * set DONE bit - tell DSP we have received the reply msg
51 	 * from DSP, and processed it, don't send more reply to host
52 	 */
53 	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
54 				       HDA_DSP_REG_HIPCIE,
55 				       HDA_DSP_REG_HIPCIE_DONE,
56 				       HDA_DSP_REG_HIPCIE_DONE);
57 
58 	/* unmask Done interrupt */
59 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
60 				HDA_DSP_REG_HIPCCTL,
61 				HDA_DSP_REG_HIPCCTL_DONE,
62 				HDA_DSP_REG_HIPCCTL_DONE);
63 }
64 
65 int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
66 {
67 	/* send IPC message to DSP */
68 	sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
69 			  msg->msg_size);
70 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
71 			  HDA_DSP_REG_HIPCI_BUSY);
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL_NS(hda_dsp_ipc_send_msg, SND_SOC_SOF_INTEL_HDA_COMMON);
76 
77 static inline bool hda_dsp_ipc4_pm_msg(u32 primary)
78 {
79 	/* pm setting is only supported by module msg */
80 	if (SOF_IPC4_MSG_IS_MODULE_MSG(primary) != SOF_IPC4_MODULE_MSG)
81 		return false;
82 
83 	if (SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_DX ||
84 	    SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_D0IX)
85 		return true;
86 
87 	return false;
88 }
89 
90 void hda_dsp_ipc4_schedule_d0i3_work(struct sof_intel_hda_dev *hdev,
91 				     struct snd_sof_ipc_msg *msg)
92 {
93 	struct sof_ipc4_msg *msg_data = msg->msg_data;
94 
95 	/* Schedule a delayed work for d0i3 entry after sending non-pm ipc msg */
96 	if (hda_dsp_ipc4_pm_msg(msg_data->primary))
97 		return;
98 
99 	mod_delayed_work(system_wq, &hdev->d0i3_work,
100 			 msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
101 }
102 EXPORT_SYMBOL_NS(hda_dsp_ipc4_schedule_d0i3_work, SND_SOC_SOF_INTEL_HDA_COMMON);
103 
104 int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
105 {
106 	struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
107 	struct sof_ipc4_msg *msg_data = msg->msg_data;
108 
109 	if (hda_ipc4_tx_is_busy(sdev)) {
110 		hdev->delayed_ipc_tx_msg = msg;
111 		return 0;
112 	}
113 
114 	hdev->delayed_ipc_tx_msg = NULL;
115 
116 	/* send the message via mailbox */
117 	if (msg_data->data_size)
118 		sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
119 				  msg_data->data_size);
120 
121 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE, msg_data->extension);
122 	snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
123 			  msg_data->primary | HDA_DSP_REG_HIPCI_BUSY);
124 
125 	hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
126 
127 	return 0;
128 }
129 EXPORT_SYMBOL_NS(hda_dsp_ipc4_send_msg, SND_SOC_SOF_INTEL_HDA_COMMON);
130 
131 void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
132 {
133 	struct snd_sof_ipc_msg *msg = sdev->msg;
134 	struct sof_ipc_reply reply;
135 	struct sof_ipc_cmd_hdr *hdr;
136 
137 	/*
138 	 * Sometimes, there is unexpected reply ipc arriving. The reply
139 	 * ipc belongs to none of the ipcs sent from driver.
140 	 * In this case, the driver must ignore the ipc.
141 	 */
142 	if (!msg) {
143 		dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
144 		return;
145 	}
146 
147 	hdr = msg->msg_data;
148 	if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
149 	    hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
150 		/*
151 		 * memory windows are powered off before sending IPC reply,
152 		 * so we can't read the mailbox for CTX_SAVE and PM_GATE
153 		 * replies.
154 		 */
155 		reply.error = 0;
156 		reply.hdr.cmd = SOF_IPC_GLB_REPLY;
157 		reply.hdr.size = sizeof(reply);
158 		memcpy(msg->reply_data, &reply, sizeof(reply));
159 
160 		msg->reply_error = 0;
161 	} else {
162 		snd_sof_ipc_get_reply(sdev);
163 	}
164 }
165 EXPORT_SYMBOL_NS(hda_dsp_ipc_get_reply, SND_SOC_SOF_INTEL_HDA_COMMON);
166 
167 irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
168 {
169 	struct sof_ipc4_msg notification_data = {{ 0 }};
170 	struct snd_sof_dev *sdev = context;
171 	bool ack_received = false;
172 	bool ipc_irq = false;
173 	u32 hipcie, hipct;
174 
175 	hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
176 	hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
177 
178 	if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
179 		/* DSP received the message */
180 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
181 					HDA_DSP_REG_HIPCCTL_DONE, 0);
182 		hda_dsp_ipc_dsp_done(sdev);
183 
184 		ipc_irq = true;
185 		ack_received = true;
186 	}
187 
188 	if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
189 		/* Message from DSP (reply or notification) */
190 		u32 hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
191 					      HDA_DSP_REG_HIPCTE);
192 		u32 primary = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
193 		u32 extension = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
194 
195 		/* mask BUSY interrupt */
196 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
197 					HDA_DSP_REG_HIPCCTL_BUSY, 0);
198 
199 		if (primary & SOF_IPC4_MSG_DIR_MASK) {
200 			/* Reply received */
201 			if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
202 				struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
203 
204 				data->primary = primary;
205 				data->extension = extension;
206 
207 				spin_lock_irq(&sdev->ipc_lock);
208 
209 				snd_sof_ipc_get_reply(sdev);
210 				hda_dsp_ipc_host_done(sdev);
211 				snd_sof_ipc_reply(sdev, data->primary);
212 
213 				spin_unlock_irq(&sdev->ipc_lock);
214 			} else {
215 				dev_dbg_ratelimited(sdev->dev,
216 						    "IPC reply before FW_READY: %#x|%#x\n",
217 						    primary, extension);
218 			}
219 		} else {
220 			/* Notification received */
221 
222 			notification_data.primary = primary;
223 			notification_data.extension = extension;
224 			sdev->ipc->msg.rx_data = &notification_data;
225 			snd_sof_ipc_msgs_rx(sdev);
226 			sdev->ipc->msg.rx_data = NULL;
227 
228 			/* Let DSP know that we have finished processing the message */
229 			hda_dsp_ipc_host_done(sdev);
230 		}
231 
232 		ipc_irq = true;
233 	}
234 
235 	if (!ipc_irq)
236 		/* This interrupt is not shared so no need to return IRQ_NONE. */
237 		dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
238 
239 	if (ack_received) {
240 		struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
241 
242 		if (hdev->delayed_ipc_tx_msg)
243 			hda_dsp_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
244 	}
245 
246 	return IRQ_HANDLED;
247 }
248 EXPORT_SYMBOL_NS(hda_dsp_ipc4_irq_thread, SND_SOC_SOF_INTEL_HDA_COMMON);
249 
250 /* IPC handler thread */
251 irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
252 {
253 	struct snd_sof_dev *sdev = context;
254 	u32 hipci;
255 	u32 hipcie;
256 	u32 hipct;
257 	u32 hipcte;
258 	u32 msg;
259 	u32 msg_ext;
260 	bool ipc_irq = false;
261 
262 	/* read IPC status */
263 	hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
264 				  HDA_DSP_REG_HIPCIE);
265 	hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
266 	hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
267 	hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
268 
269 	/* is this a reply message from the DSP */
270 	if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
271 		msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
272 		msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
273 
274 		trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
275 
276 		/* mask Done interrupt */
277 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
278 					HDA_DSP_REG_HIPCCTL,
279 					HDA_DSP_REG_HIPCCTL_DONE, 0);
280 
281 		/*
282 		 * Make sure the interrupt thread cannot be preempted between
283 		 * waking up the sender and re-enabling the interrupt. Also
284 		 * protect against a theoretical race with sof_ipc_tx_message():
285 		 * if the DSP is fast enough to receive an IPC message, reply to
286 		 * it, and the host interrupt processing calls this function on
287 		 * a different core from the one, where the sending is taking
288 		 * place, the message might not yet be marked as expecting a
289 		 * reply.
290 		 */
291 		if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
292 			spin_lock_irq(&sdev->ipc_lock);
293 
294 			/* handle immediate reply from DSP core */
295 			hda_dsp_ipc_get_reply(sdev);
296 			snd_sof_ipc_reply(sdev, msg);
297 
298 			/* set the done bit */
299 			hda_dsp_ipc_dsp_done(sdev);
300 
301 			spin_unlock_irq(&sdev->ipc_lock);
302 		} else {
303 			dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
304 					    msg);
305 		}
306 
307 		ipc_irq = true;
308 	}
309 
310 	/* is this a new message from DSP */
311 	if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
312 		msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
313 		msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
314 
315 		trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
316 
317 		/* mask BUSY interrupt */
318 		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
319 					HDA_DSP_REG_HIPCCTL,
320 					HDA_DSP_REG_HIPCCTL_BUSY, 0);
321 
322 		/* handle messages from DSP */
323 		if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
324 			struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
325 			bool non_recoverable = true;
326 
327 			/*
328 			 * This is a PANIC message!
329 			 *
330 			 * If it is arriving during firmware boot and it is not
331 			 * the last boot attempt then change the non_recoverable
332 			 * to false as the DSP might be able to boot in the next
333 			 * iteration(s)
334 			 */
335 			if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
336 			    hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
337 				non_recoverable = false;
338 
339 			snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
340 					  non_recoverable);
341 		} else {
342 			/* normal message - process normally */
343 			snd_sof_ipc_msgs_rx(sdev);
344 		}
345 
346 		hda_dsp_ipc_host_done(sdev);
347 
348 		ipc_irq = true;
349 	}
350 
351 	if (!ipc_irq) {
352 		/*
353 		 * This interrupt is not shared so no need to return IRQ_NONE.
354 		 */
355 		dev_dbg_ratelimited(sdev->dev,
356 				    "nothing to do in IPC IRQ thread\n");
357 	}
358 
359 	return IRQ_HANDLED;
360 }
361 EXPORT_SYMBOL_NS(hda_dsp_ipc_irq_thread, SND_SOC_SOF_INTEL_HDA_COMMON);
362 
363 /* Check if an IPC IRQ occurred */
364 bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
365 {
366 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
367 	bool ret = false;
368 	u32 irq_status;
369 
370 	if (sdev->dspless_mode_selected)
371 		return false;
372 
373 	/* store status */
374 	irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
375 	trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
376 
377 	/* invalid message ? */
378 	if (irq_status == 0xffffffff)
379 		goto out;
380 
381 	/* IPC message ? */
382 	if (irq_status & HDA_DSP_ADSPIS_IPC)
383 		ret = true;
384 
385 	/* CLDMA message ? */
386 	if (irq_status & HDA_DSP_ADSPIS_CL_DMA) {
387 		hda->code_loading = 0;
388 		wake_up(&hda->waitq);
389 		ret = false;
390 	}
391 
392 out:
393 	return ret;
394 }
395 EXPORT_SYMBOL_NS(hda_dsp_check_ipc_irq, SND_SOC_SOF_INTEL_HDA_COMMON);
396 
397 int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
398 {
399 	return HDA_DSP_MBOX_UPLINK_OFFSET;
400 }
401 EXPORT_SYMBOL_NS(hda_dsp_ipc_get_mailbox_offset, SND_SOC_SOF_INTEL_HDA_COMMON);
402 
403 int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
404 {
405 	return SRAM_WINDOW_OFFSET(id);
406 }
407 EXPORT_SYMBOL_NS(hda_dsp_ipc_get_window_offset, SND_SOC_SOF_INTEL_HDA_COMMON);
408 
409 int hda_ipc_msg_data(struct snd_sof_dev *sdev,
410 		     struct snd_sof_pcm_stream *sps,
411 		     void *p, size_t sz)
412 {
413 	if (!sps || !sdev->stream_box.size) {
414 		sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
415 	} else {
416 		struct snd_pcm_substream *substream = sps->substream;
417 		struct hdac_stream *hstream = substream->runtime->private_data;
418 		struct sof_intel_hda_stream *hda_stream;
419 
420 		hda_stream = container_of(hstream,
421 					  struct sof_intel_hda_stream,
422 					  hext_stream.hstream);
423 
424 		/* The stream might already be closed */
425 		if (!hstream)
426 			return -ESTRPIPE;
427 
428 		sof_mailbox_read(sdev, hda_stream->sof_intel_stream.posn_offset, p, sz);
429 	}
430 
431 	return 0;
432 }
433 EXPORT_SYMBOL_NS(hda_ipc_msg_data, SND_SOC_SOF_INTEL_HDA_COMMON);
434 
435 int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
436 			       struct snd_sof_pcm_stream *sps,
437 			       size_t posn_offset)
438 {
439 	struct snd_pcm_substream *substream = sps->substream;
440 	struct hdac_stream *hstream = substream->runtime->private_data;
441 	struct sof_intel_hda_stream *hda_stream;
442 
443 	hda_stream = container_of(hstream, struct sof_intel_hda_stream,
444 				  hext_stream.hstream);
445 
446 	/* check for unaligned offset or overflow */
447 	if (posn_offset > sdev->stream_box.size ||
448 	    posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
449 		return -EINVAL;
450 
451 	hda_stream->sof_intel_stream.posn_offset = sdev->stream_box.offset + posn_offset;
452 
453 	dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
454 		substream->stream, hda_stream->sof_intel_stream.posn_offset);
455 
456 	return 0;
457 }
458 EXPORT_SYMBOL_NS(hda_set_stream_data_offset, SND_SOC_SOF_INTEL_HDA_COMMON);
459 
460 void hda_ipc4_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
461 {
462 	char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
463 
464 	/* print ROM/FW status */
465 	hda_dsp_get_state(sdev, level);
466 
467 	if (flags & SOF_DBG_DUMP_REGS)
468 		sof_ipc4_intel_dump_telemetry_state(sdev, flags);
469 	else
470 		hda_dsp_dump_ext_rom_status(sdev, level, flags);
471 }
472 EXPORT_SYMBOL_NS(hda_ipc4_dsp_dump, SND_SOC_SOF_INTEL_HDA_COMMON);
473 
474 bool hda_check_ipc_irq(struct snd_sof_dev *sdev)
475 {
476 	const struct sof_intel_dsp_desc *chip;
477 
478 	chip = get_chip_info(sdev->pdata);
479 	if (chip && chip->check_ipc_irq)
480 		return chip->check_ipc_irq(sdev);
481 
482 	return false;
483 }
484 EXPORT_SYMBOL_NS(hda_check_ipc_irq, SND_SOC_SOF_INTEL_HDA_COMMON);
485 
486 void hda_ipc_irq_dump(struct snd_sof_dev *sdev)
487 {
488 	u32 adspis;
489 	u32 intsts;
490 	u32 intctl;
491 	u32 ppsts;
492 	u8 rirbsts;
493 
494 	/* read key IRQ stats and config registers */
495 	adspis = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
496 	intsts = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
497 	intctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL);
498 	ppsts = snd_sof_dsp_read(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPSTS);
499 	rirbsts = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, AZX_REG_RIRBSTS);
500 
501 	dev_err(sdev->dev, "hda irq intsts 0x%8.8x intlctl 0x%8.8x rirb %2.2x\n",
502 		intsts, intctl, rirbsts);
503 	dev_err(sdev->dev, "dsp irq ppsts 0x%8.8x adspis 0x%8.8x\n", ppsts, adspis);
504 }
505 EXPORT_SYMBOL_NS(hda_ipc_irq_dump, SND_SOC_SOF_INTEL_HDA_COMMON);
506 
507 void hda_ipc_dump(struct snd_sof_dev *sdev)
508 {
509 	u32 hipcie;
510 	u32 hipct;
511 	u32 hipcctl;
512 
513 	hda_ipc_irq_dump(sdev);
514 
515 	/* read IPC status */
516 	hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
517 	hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
518 	hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
519 
520 	/* dump the IPC regs */
521 	/* TODO: parse the raw msg */
522 	dev_err(sdev->dev, "host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
523 		hipcie, hipct, hipcctl);
524 }
525 EXPORT_SYMBOL_NS(hda_ipc_dump, SND_SOC_SOF_INTEL_HDA_COMMON);
526 
527 void hda_ipc4_dump(struct snd_sof_dev *sdev)
528 {
529 	u32 hipci, hipcie, hipct, hipcte, hipcctl;
530 
531 	hda_ipc_irq_dump(sdev);
532 
533 	hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
534 	hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
535 	hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
536 	hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
537 	hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
538 
539 	/* dump the IPC regs */
540 	/* TODO: parse the raw msg */
541 	dev_err(sdev->dev, "Host IPC initiator: %#x|%#x, target: %#x|%#x, ctl: %#x\n",
542 		hipci, hipcie, hipct, hipcte, hipcctl);
543 }
544 EXPORT_SYMBOL_NS(hda_ipc4_dump, SND_SOC_SOF_INTEL_HDA_COMMON);
545 
546 bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev)
547 {
548 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
549 	const struct sof_intel_dsp_desc *chip = hda->desc;
550 	u32 val;
551 
552 	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->ipc_req);
553 
554 	return !!(val & chip->ipc_req_mask);
555 }
556 EXPORT_SYMBOL_NS(hda_ipc4_tx_is_busy, SND_SOC_SOF_INTEL_HDA_COMMON);
557