xref: /linux/sound/soc/sof/ipc3-dtrace.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2022 Intel Corporation
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 
7 #include <linux/debugfs.h>
8 #include <linux/sched/signal.h>
9 #include "sof-priv.h"
10 #include "sof-audio.h"
11 #include "ops.h"
12 #include "sof-utils.h"
13 #include "ipc3-priv.h"
14 
15 #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4
16 #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024
17 
18 enum sof_dtrace_state {
19 	SOF_DTRACE_DISABLED,
20 	SOF_DTRACE_STOPPED,
21 	SOF_DTRACE_INITIALIZING,
22 	SOF_DTRACE_ENABLED,
23 };
24 
25 struct sof_dtrace_priv {
26 	struct snd_dma_buffer dmatb;
27 	struct snd_dma_buffer dmatp;
28 	int dma_trace_pages;
29 	wait_queue_head_t trace_sleep;
30 	u32 host_offset;
31 	bool dtrace_error;
32 	bool dtrace_draining;
33 	enum sof_dtrace_state dtrace_state;
34 };
35 
36 static bool trace_pos_update_expected(struct sof_dtrace_priv *priv)
37 {
38 	if (priv->dtrace_state == SOF_DTRACE_ENABLED ||
39 	    priv->dtrace_state == SOF_DTRACE_INITIALIZING)
40 		return true;
41 
42 	return false;
43 }
44 
45 static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value,
46 				    struct sof_ipc_trace_filter_elem *elem_list,
47 				    int capacity, int *counter)
48 {
49 	if (*counter >= capacity)
50 		return -ENOMEM;
51 
52 	elem_list[*counter].key = key;
53 	elem_list[*counter].value = value;
54 	++*counter;
55 
56 	return 0;
57 }
58 
59 static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line,
60 				    struct sof_ipc_trace_filter_elem *elem,
61 				    int capacity, int *counter)
62 {
63 	int log_level, pipe_id, comp_id, read, ret;
64 	int len = strlen(line);
65 	int cnt = *counter;
66 	u32 uuid_id;
67 
68 	/* ignore empty content */
69 	ret = sscanf(line, " %n", &read);
70 	if (!ret && read == len)
71 		return len;
72 
73 	ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read);
74 	if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) {
75 		dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line);
76 		return -EINVAL;
77 	}
78 
79 	if (uuid_id > 0) {
80 		ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID,
81 					       uuid_id, elem, capacity, &cnt);
82 		if (ret)
83 			return ret;
84 	}
85 	if (pipe_id >= 0) {
86 		ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE,
87 					       pipe_id, elem, capacity, &cnt);
88 		if (ret)
89 			return ret;
90 	}
91 	if (comp_id >= 0) {
92 		ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP,
93 					       comp_id, elem, capacity, &cnt);
94 		if (ret)
95 			return ret;
96 	}
97 
98 	ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL |
99 				       SOF_IPC_TRACE_FILTER_ELEM_FIN,
100 				       log_level, elem, capacity, &cnt);
101 	if (ret)
102 		return ret;
103 
104 	/* update counter only when parsing whole entry passed */
105 	*counter = cnt;
106 
107 	return len;
108 }
109 
110 static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
111 			      int *out_elem_cnt,
112 			      struct sof_ipc_trace_filter_elem **out)
113 {
114 	static const char entry_delimiter[] = ";";
115 	char *entry = string;
116 	int capacity = 0;
117 	int entry_len;
118 	int cnt = 0;
119 
120 	/*
121 	 * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY
122 	 * IPC elements, depending on content. Calculate IPC elements capacity
123 	 * for the input string where each element is set.
124 	 */
125 	while (entry) {
126 		capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY;
127 		entry = strchr(entry + 1, entry_delimiter[0]);
128 	}
129 	*out = kmalloc(capacity * sizeof(**out), GFP_KERNEL);
130 	if (!*out)
131 		return -ENOMEM;
132 
133 	/* split input string by ';', and parse each entry separately in trace_filter_parse_entry */
134 	while ((entry = strsep(&string, entry_delimiter))) {
135 		entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt);
136 		if (entry_len < 0) {
137 			dev_err(sdev->dev,
138 				"Parsing filter entry '%s' failed with %d\n",
139 				entry, entry_len);
140 			return -EINVAL;
141 		}
142 	}
143 
144 	*out_elem_cnt = cnt;
145 
146 	return 0;
147 }
148 
149 static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems,
150 				    struct sof_ipc_trace_filter_elem *elems)
151 {
152 	struct sof_ipc_trace_filter *msg;
153 	size_t size;
154 	int ret;
155 
156 	size = struct_size(msg, elems, num_elems);
157 	if (size > SOF_IPC_MSG_MAX_SIZE)
158 		return -EINVAL;
159 
160 	msg = kmalloc(size, GFP_KERNEL);
161 	if (!msg)
162 		return -ENOMEM;
163 
164 	msg->hdr.size = size;
165 	msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE;
166 	msg->elem_cnt = num_elems;
167 	memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems));
168 
169 	ret = pm_runtime_resume_and_get(sdev->dev);
170 	if (ret < 0 && ret != -EACCES) {
171 		dev_err(sdev->dev, "enabling device failed: %d\n", ret);
172 		goto error;
173 	}
174 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, msg, msg->hdr.size);
175 	pm_runtime_mark_last_busy(sdev->dev);
176 	pm_runtime_put_autosuspend(sdev->dev);
177 
178 error:
179 	kfree(msg);
180 	return ret;
181 }
182 
183 static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from,
184 					   size_t count, loff_t *ppos)
185 {
186 	struct snd_sof_dfsentry *dfse = file->private_data;
187 	struct sof_ipc_trace_filter_elem *elems = NULL;
188 	struct snd_sof_dev *sdev = dfse->sdev;
189 	int num_elems;
190 	char *string;
191 	int ret;
192 
193 	if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) {
194 		dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count,
195 			TRACE_FILTER_MAX_CONFIG_STRING_LENGTH);
196 		return -EINVAL;
197 	}
198 
199 	string = memdup_user_nul(from, count);
200 	if (IS_ERR(string))
201 		return PTR_ERR(string);
202 
203 	ret = trace_filter_parse(sdev, string, &num_elems, &elems);
204 	if (ret < 0)
205 		goto error;
206 
207 	if (num_elems) {
208 		ret = ipc3_trace_update_filter(sdev, num_elems, elems);
209 		if (ret < 0) {
210 			dev_err(sdev->dev, "Filter update failed: %d\n", ret);
211 			goto error;
212 		}
213 	}
214 	ret = count;
215 error:
216 	kfree(string);
217 	kfree(elems);
218 	return ret;
219 }
220 
221 static const struct file_operations sof_dfs_trace_filter_fops = {
222 	.open = simple_open,
223 	.write = dfsentry_trace_filter_write,
224 	.llseek = default_llseek,
225 };
226 
227 static int debugfs_create_trace_filter(struct snd_sof_dev *sdev)
228 {
229 	struct snd_sof_dfsentry *dfse;
230 
231 	dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
232 	if (!dfse)
233 		return -ENOMEM;
234 
235 	dfse->sdev = sdev;
236 	dfse->type = SOF_DFSENTRY_TYPE_BUF;
237 
238 	debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse,
239 			    &sof_dfs_trace_filter_fops);
240 	/* add to dfsentry list */
241 	list_add(&dfse->list, &sdev->dfsentry_list);
242 
243 	return 0;
244 }
245 
246 static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset)
247 {
248 	u32 host_offset = READ_ONCE(priv->host_offset);
249 
250 	if (host_offset != new_offset) {
251 		/* This is a bit paranoid and unlikely that it is needed */
252 		u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset);
253 
254 		if (ret == host_offset)
255 			return true;
256 	}
257 
258 	return false;
259 }
260 
261 static size_t sof_dtrace_avail(struct snd_sof_dev *sdev,
262 			       loff_t pos, size_t buffer_size)
263 {
264 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
265 	loff_t host_offset = READ_ONCE(priv->host_offset);
266 
267 	/*
268 	 * If host offset is less than local pos, it means write pointer of
269 	 * host DMA buffer has been wrapped. We should output the trace data
270 	 * at the end of host DMA buffer at first.
271 	 */
272 	if (host_offset < pos)
273 		return buffer_size - pos;
274 
275 	/* If there is available trace data now, it is unnecessary to wait. */
276 	if (host_offset > pos)
277 		return host_offset - pos;
278 
279 	return 0;
280 }
281 
282 static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos,
283 				    size_t buffer_size)
284 {
285 	size_t ret = sof_dtrace_avail(sdev, pos, buffer_size);
286 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
287 	wait_queue_entry_t wait;
288 
289 	/* data immediately available */
290 	if (ret)
291 		return ret;
292 
293 	if (priv->dtrace_draining && !trace_pos_update_expected(priv)) {
294 		/*
295 		 * tracing has ended and all traces have been
296 		 * read by client, return EOF
297 		 */
298 		priv->dtrace_draining = false;
299 		return 0;
300 	}
301 
302 	/* wait for available trace data from FW */
303 	init_waitqueue_entry(&wait, current);
304 	set_current_state(TASK_INTERRUPTIBLE);
305 	add_wait_queue(&priv->trace_sleep, &wait);
306 
307 	if (!signal_pending(current)) {
308 		/* set timeout to max value, no error code */
309 		schedule_timeout(MAX_SCHEDULE_TIMEOUT);
310 	}
311 	remove_wait_queue(&priv->trace_sleep, &wait);
312 
313 	return sof_dtrace_avail(sdev, pos, buffer_size);
314 }
315 
316 static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer,
317 				    size_t count, loff_t *ppos)
318 {
319 	struct snd_sof_dfsentry *dfse = file->private_data;
320 	struct snd_sof_dev *sdev = dfse->sdev;
321 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
322 	unsigned long rem;
323 	loff_t lpos = *ppos;
324 	size_t avail, buffer_size = dfse->size;
325 	u64 lpos_64;
326 
327 	/* make sure we know about any failures on the DSP side */
328 	priv->dtrace_error = false;
329 
330 	/* check pos and count */
331 	if (lpos < 0)
332 		return -EINVAL;
333 	if (!count)
334 		return 0;
335 
336 	/* check for buffer wrap and count overflow */
337 	lpos_64 = lpos;
338 	lpos = do_div(lpos_64, buffer_size);
339 
340 	/* get available count based on current host offset */
341 	avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size);
342 	if (priv->dtrace_error) {
343 		dev_err(sdev->dev, "trace IO error\n");
344 		return -EIO;
345 	}
346 
347 	/* no new trace data */
348 	if (!avail)
349 		return 0;
350 
351 	/* make sure count is <= avail */
352 	if (count > avail)
353 		count = avail;
354 
355 	/*
356 	 * make sure that all trace data is available for the CPU as the trace
357 	 * data buffer might be allocated from non consistent memory.
358 	 * Note: snd_dma_buffer_sync() is called for normal audio playback and
359 	 *	 capture streams also.
360 	 */
361 	snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU);
362 	/* copy available trace data to debugfs */
363 	rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
364 	if (rem)
365 		return -EFAULT;
366 
367 	*ppos += count;
368 
369 	/* move debugfs reading position */
370 	return count;
371 }
372 
373 static int dfsentry_dtrace_release(struct inode *inode, struct file *file)
374 {
375 	struct snd_sof_dfsentry *dfse = inode->i_private;
376 	struct snd_sof_dev *sdev = dfse->sdev;
377 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
378 
379 	/* avoid duplicate traces at next open */
380 	if (priv->dtrace_state != SOF_DTRACE_ENABLED)
381 		sof_dtrace_set_host_offset(priv, 0);
382 
383 	return 0;
384 }
385 
386 static const struct file_operations sof_dfs_dtrace_fops = {
387 	.open = simple_open,
388 	.read = dfsentry_dtrace_read,
389 	.llseek = default_llseek,
390 	.release = dfsentry_dtrace_release,
391 };
392 
393 static int debugfs_create_dtrace(struct snd_sof_dev *sdev)
394 {
395 	struct sof_dtrace_priv *priv;
396 	struct snd_sof_dfsentry *dfse;
397 	int ret;
398 
399 	if (!sdev)
400 		return -EINVAL;
401 
402 	priv = sdev->fw_trace_data;
403 
404 	ret = debugfs_create_trace_filter(sdev);
405 	if (ret < 0)
406 		dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret);
407 
408 	dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
409 	if (!dfse)
410 		return -ENOMEM;
411 
412 	dfse->type = SOF_DFSENTRY_TYPE_BUF;
413 	dfse->buf = priv->dmatb.area;
414 	dfse->size = priv->dmatb.bytes;
415 	dfse->sdev = sdev;
416 
417 	debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
418 			    &sof_dfs_dtrace_fops);
419 
420 	return 0;
421 }
422 
423 static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
424 {
425 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
426 	struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
427 	struct sof_ipc_fw_version *v = &ready->version;
428 	struct sof_ipc_dma_trace_params_ext params;
429 	int ret;
430 
431 	if (!sdev->fw_trace_is_supported)
432 		return 0;
433 
434 	if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages)
435 		return -EINVAL;
436 
437 	if (priv->dtrace_state == SOF_DTRACE_STOPPED)
438 		goto start;
439 
440 	/* set IPC parameters */
441 	params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
442 	/* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
443 	if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
444 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
445 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
446 		params.timestamp_ns = ktime_get(); /* in nanosecond */
447 	} else {
448 		params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
449 		params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
450 	}
451 	params.buffer.phy_addr = priv->dmatp.addr;
452 	params.buffer.size = priv->dmatb.bytes;
453 	params.buffer.pages = priv->dma_trace_pages;
454 	params.stream_tag = 0;
455 
456 	sof_dtrace_set_host_offset(priv, 0);
457 	priv->dtrace_draining = false;
458 
459 	ret = sof_dtrace_host_init(sdev, &priv->dmatb, &params);
460 	if (ret < 0) {
461 		dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret);
462 		return ret;
463 	}
464 	dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
465 
466 	/* send IPC to the DSP */
467 	priv->dtrace_state = SOF_DTRACE_INITIALIZING;
468 	ret = sof_ipc_tx_message_no_reply(sdev->ipc, &params, sizeof(params));
469 	if (ret < 0) {
470 		dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret);
471 		goto trace_release;
472 	}
473 
474 start:
475 	priv->dtrace_state = SOF_DTRACE_ENABLED;
476 
477 	ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START);
478 	if (ret < 0) {
479 		dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret);
480 		goto trace_release;
481 	}
482 
483 	return 0;
484 
485 trace_release:
486 	priv->dtrace_state = SOF_DTRACE_DISABLED;
487 	sof_dtrace_host_release(sdev);
488 	return ret;
489 }
490 
491 static int ipc3_dtrace_init(struct snd_sof_dev *sdev)
492 {
493 	struct sof_dtrace_priv *priv;
494 	int ret;
495 
496 	/* dtrace is only supported with SOF_IPC */
497 	if (sdev->pdata->ipc_type != SOF_IPC_TYPE_3)
498 		return -EOPNOTSUPP;
499 
500 	if (sdev->fw_trace_data) {
501 		dev_err(sdev->dev, "fw_trace_data has been already allocated\n");
502 		return -EBUSY;
503 	}
504 
505 	priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL);
506 	if (!priv)
507 		return -ENOMEM;
508 
509 	sdev->fw_trace_data = priv;
510 
511 	/* set false before start initialization */
512 	priv->dtrace_state = SOF_DTRACE_DISABLED;
513 
514 	/* allocate trace page table buffer */
515 	ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
516 				  PAGE_SIZE, &priv->dmatp);
517 	if (ret < 0) {
518 		dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret);
519 		return ret;
520 	}
521 
522 	/* allocate trace data buffer */
523 	ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
524 				      DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE,
525 				      &priv->dmatb);
526 	if (ret < 0) {
527 		dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret);
528 		goto page_err;
529 	}
530 
531 	/* create compressed page table for audio firmware */
532 	ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb,
533 					priv->dmatp.area, priv->dmatb.bytes);
534 	if (ret < 0)
535 		goto table_err;
536 
537 	priv->dma_trace_pages = ret;
538 	dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages);
539 
540 	if (sdev->first_boot) {
541 		ret = debugfs_create_dtrace(sdev);
542 		if (ret < 0)
543 			goto table_err;
544 	}
545 
546 	init_waitqueue_head(&priv->trace_sleep);
547 
548 	ret = ipc3_dtrace_enable(sdev);
549 	if (ret < 0)
550 		goto table_err;
551 
552 	return 0;
553 table_err:
554 	priv->dma_trace_pages = 0;
555 	snd_dma_free_pages(&priv->dmatb);
556 page_err:
557 	snd_dma_free_pages(&priv->dmatp);
558 	return ret;
559 }
560 
561 int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev,
562 			    struct sof_ipc_dma_trace_posn *posn)
563 {
564 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
565 
566 	if (!sdev->fw_trace_is_supported)
567 		return 0;
568 
569 	if (trace_pos_update_expected(priv) &&
570 	    sof_dtrace_set_host_offset(priv, posn->host_offset))
571 		wake_up(&priv->trace_sleep);
572 
573 	if (posn->overflow != 0)
574 		dev_err(sdev->dev,
575 			"DSP trace buffer overflow %u bytes. Total messages %d\n",
576 			posn->overflow, posn->messages);
577 
578 	return 0;
579 }
580 
581 /* an error has occurred within the DSP that prevents further trace */
582 static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev)
583 {
584 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
585 
586 	if (priv->dtrace_state == SOF_DTRACE_ENABLED) {
587 		priv->dtrace_error = true;
588 		wake_up(&priv->trace_sleep);
589 	}
590 }
591 
592 static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop)
593 {
594 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
595 	struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
596 	struct sof_ipc_fw_version *v = &ready->version;
597 	struct sof_ipc_cmd_hdr hdr;
598 	int ret;
599 
600 	if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED)
601 		return;
602 
603 	ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
604 	if (ret < 0)
605 		dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret);
606 	priv->dtrace_state = SOF_DTRACE_STOPPED;
607 
608 	/*
609 	 * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from
610 	 * ABI 3.20.0 onwards
611 	 */
612 	if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) {
613 		hdr.size = sizeof(hdr);
614 		hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE;
615 
616 		ret = sof_ipc_tx_message_no_reply(sdev->ipc, &hdr, hdr.size);
617 		if (ret < 0)
618 			dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret);
619 	}
620 
621 	if (only_stop)
622 		goto out;
623 
624 	ret = sof_dtrace_host_release(sdev);
625 	if (ret < 0)
626 		dev_err(sdev->dev, "Host dtrace release failed %d\n", ret);
627 
628 	priv->dtrace_state = SOF_DTRACE_DISABLED;
629 
630 out:
631 	priv->dtrace_draining = true;
632 	wake_up(&priv->trace_sleep);
633 }
634 
635 static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
636 {
637 	ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0);
638 }
639 
640 static int ipc3_dtrace_resume(struct snd_sof_dev *sdev)
641 {
642 	return ipc3_dtrace_enable(sdev);
643 }
644 
645 static void ipc3_dtrace_free(struct snd_sof_dev *sdev)
646 {
647 	struct sof_dtrace_priv *priv = sdev->fw_trace_data;
648 
649 	/* release trace */
650 	ipc3_dtrace_release(sdev, false);
651 
652 	if (priv->dma_trace_pages) {
653 		snd_dma_free_pages(&priv->dmatb);
654 		snd_dma_free_pages(&priv->dmatp);
655 		priv->dma_trace_pages = 0;
656 	}
657 }
658 
659 const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = {
660 	.init = ipc3_dtrace_init,
661 	.free = ipc3_dtrace_free,
662 	.fw_crashed = ipc3_dtrace_fw_crashed,
663 	.suspend = ipc3_dtrace_suspend,
664 	.resume = ipc3_dtrace_resume,
665 };
666