1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2022 Intel Corporation. All rights reserved. 4 // 5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 6 7 #include <linux/debugfs.h> 8 #include <linux/sched/signal.h> 9 #include "sof-priv.h" 10 #include "sof-audio.h" 11 #include "ops.h" 12 #include "sof-utils.h" 13 #include "ipc3-priv.h" 14 15 #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4 16 #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024 17 18 enum sof_dtrace_state { 19 SOF_DTRACE_DISABLED, 20 SOF_DTRACE_STOPPED, 21 SOF_DTRACE_INITIALIZING, 22 SOF_DTRACE_ENABLED, 23 }; 24 25 struct sof_dtrace_priv { 26 struct snd_dma_buffer dmatb; 27 struct snd_dma_buffer dmatp; 28 int dma_trace_pages; 29 wait_queue_head_t trace_sleep; 30 u32 host_offset; 31 bool dtrace_error; 32 bool dtrace_draining; 33 enum sof_dtrace_state dtrace_state; 34 }; 35 36 static bool trace_pos_update_expected(struct sof_dtrace_priv *priv) 37 { 38 if (priv->dtrace_state == SOF_DTRACE_ENABLED || 39 priv->dtrace_state == SOF_DTRACE_INITIALIZING) 40 return true; 41 42 return false; 43 } 44 45 static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value, 46 struct sof_ipc_trace_filter_elem *elem_list, 47 int capacity, int *counter) 48 { 49 if (*counter >= capacity) 50 return -ENOMEM; 51 52 elem_list[*counter].key = key; 53 elem_list[*counter].value = value; 54 ++*counter; 55 56 return 0; 57 } 58 59 static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line, 60 struct sof_ipc_trace_filter_elem *elem, 61 int capacity, int *counter) 62 { 63 int log_level, pipe_id, comp_id, read, ret; 64 int len = strlen(line); 65 int cnt = *counter; 66 u32 uuid_id; 67 68 /* ignore empty content */ 69 ret = sscanf(line, " %n", &read); 70 if (!ret && read == len) 71 return len; 72 73 ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read); 74 if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) { 75 dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line); 76 return -EINVAL; 77 } 78 79 if (uuid_id > 0) { 80 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID, 81 uuid_id, elem, capacity, &cnt); 82 if (ret) 83 return ret; 84 } 85 if (pipe_id >= 0) { 86 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE, 87 pipe_id, elem, capacity, &cnt); 88 if (ret) 89 return ret; 90 } 91 if (comp_id >= 0) { 92 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP, 93 comp_id, elem, capacity, &cnt); 94 if (ret) 95 return ret; 96 } 97 98 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL | 99 SOF_IPC_TRACE_FILTER_ELEM_FIN, 100 log_level, elem, capacity, &cnt); 101 if (ret) 102 return ret; 103 104 /* update counter only when parsing whole entry passed */ 105 *counter = cnt; 106 107 return len; 108 } 109 110 static int trace_filter_parse(struct snd_sof_dev *sdev, char *string, 111 int *out_elem_cnt, 112 struct sof_ipc_trace_filter_elem **out) 113 { 114 static const char entry_delimiter[] = ";"; 115 char *entry = string; 116 int capacity = 0; 117 int entry_len; 118 int cnt = 0; 119 120 /* 121 * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY 122 * IPC elements, depending on content. Calculate IPC elements capacity 123 * for the input string where each element is set. 124 */ 125 while (entry) { 126 capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY; 127 entry = strchr(entry + 1, entry_delimiter[0]); 128 } 129 *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL); 130 if (!*out) 131 return -ENOMEM; 132 133 /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */ 134 while ((entry = strsep(&string, entry_delimiter))) { 135 entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt); 136 if (entry_len < 0) { 137 dev_err(sdev->dev, 138 "Parsing filter entry '%s' failed with %d\n", 139 entry, entry_len); 140 return -EINVAL; 141 } 142 } 143 144 *out_elem_cnt = cnt; 145 146 return 0; 147 } 148 149 static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems, 150 struct sof_ipc_trace_filter_elem *elems) 151 { 152 struct sof_ipc_trace_filter *msg; 153 struct sof_ipc_reply reply; 154 size_t size; 155 int ret; 156 157 size = struct_size(msg, elems, num_elems); 158 if (size > SOF_IPC_MSG_MAX_SIZE) 159 return -EINVAL; 160 161 msg = kmalloc(size, GFP_KERNEL); 162 if (!msg) 163 return -ENOMEM; 164 165 msg->hdr.size = size; 166 msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE; 167 msg->elem_cnt = num_elems; 168 memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems)); 169 170 ret = pm_runtime_resume_and_get(sdev->dev); 171 if (ret < 0 && ret != -EACCES) { 172 dev_err(sdev->dev, "enabling device failed: %d\n", ret); 173 goto error; 174 } 175 ret = sof_ipc_tx_message(sdev->ipc, msg, msg->hdr.size, &reply, sizeof(reply)); 176 pm_runtime_mark_last_busy(sdev->dev); 177 pm_runtime_put_autosuspend(sdev->dev); 178 179 error: 180 kfree(msg); 181 return ret ? ret : reply.error; 182 } 183 184 static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from, 185 size_t count, loff_t *ppos) 186 { 187 struct snd_sof_dfsentry *dfse = file->private_data; 188 struct sof_ipc_trace_filter_elem *elems = NULL; 189 struct snd_sof_dev *sdev = dfse->sdev; 190 loff_t pos = 0; 191 int num_elems; 192 char *string; 193 int ret; 194 195 if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) { 196 dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count, 197 TRACE_FILTER_MAX_CONFIG_STRING_LENGTH); 198 return -EINVAL; 199 } 200 201 string = kmalloc(count + 1, GFP_KERNEL); 202 if (!string) 203 return -ENOMEM; 204 205 /* assert null termination */ 206 string[count] = 0; 207 ret = simple_write_to_buffer(string, count, &pos, from, count); 208 if (ret < 0) 209 goto error; 210 211 ret = trace_filter_parse(sdev, string, &num_elems, &elems); 212 if (ret < 0) 213 goto error; 214 215 if (num_elems) { 216 ret = ipc3_trace_update_filter(sdev, num_elems, elems); 217 if (ret < 0) { 218 dev_err(sdev->dev, "Filter update failed: %d\n", ret); 219 goto error; 220 } 221 } 222 ret = count; 223 error: 224 kfree(string); 225 kfree(elems); 226 return ret; 227 } 228 229 static const struct file_operations sof_dfs_trace_filter_fops = { 230 .open = simple_open, 231 .write = dfsentry_trace_filter_write, 232 .llseek = default_llseek, 233 }; 234 235 static int debugfs_create_trace_filter(struct snd_sof_dev *sdev) 236 { 237 struct snd_sof_dfsentry *dfse; 238 239 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 240 if (!dfse) 241 return -ENOMEM; 242 243 dfse->sdev = sdev; 244 dfse->type = SOF_DFSENTRY_TYPE_BUF; 245 246 debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse, 247 &sof_dfs_trace_filter_fops); 248 /* add to dfsentry list */ 249 list_add(&dfse->list, &sdev->dfsentry_list); 250 251 return 0; 252 } 253 254 static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset) 255 { 256 u32 host_offset = READ_ONCE(priv->host_offset); 257 258 if (host_offset != new_offset) { 259 /* This is a bit paranoid and unlikely that it is needed */ 260 u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset); 261 262 if (ret == host_offset) 263 return true; 264 } 265 266 return false; 267 } 268 269 static size_t sof_dtrace_avail(struct snd_sof_dev *sdev, 270 loff_t pos, size_t buffer_size) 271 { 272 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 273 loff_t host_offset = READ_ONCE(priv->host_offset); 274 275 /* 276 * If host offset is less than local pos, it means write pointer of 277 * host DMA buffer has been wrapped. We should output the trace data 278 * at the end of host DMA buffer at first. 279 */ 280 if (host_offset < pos) 281 return buffer_size - pos; 282 283 /* If there is available trace data now, it is unnecessary to wait. */ 284 if (host_offset > pos) 285 return host_offset - pos; 286 287 return 0; 288 } 289 290 static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos, 291 size_t buffer_size) 292 { 293 size_t ret = sof_dtrace_avail(sdev, pos, buffer_size); 294 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 295 wait_queue_entry_t wait; 296 297 /* data immediately available */ 298 if (ret) 299 return ret; 300 301 if (priv->dtrace_draining && !trace_pos_update_expected(priv)) { 302 /* 303 * tracing has ended and all traces have been 304 * read by client, return EOF 305 */ 306 priv->dtrace_draining = false; 307 return 0; 308 } 309 310 /* wait for available trace data from FW */ 311 init_waitqueue_entry(&wait, current); 312 set_current_state(TASK_INTERRUPTIBLE); 313 add_wait_queue(&priv->trace_sleep, &wait); 314 315 if (!signal_pending(current)) { 316 /* set timeout to max value, no error code */ 317 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 318 } 319 remove_wait_queue(&priv->trace_sleep, &wait); 320 321 return sof_dtrace_avail(sdev, pos, buffer_size); 322 } 323 324 static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer, 325 size_t count, loff_t *ppos) 326 { 327 struct snd_sof_dfsentry *dfse = file->private_data; 328 struct snd_sof_dev *sdev = dfse->sdev; 329 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 330 unsigned long rem; 331 loff_t lpos = *ppos; 332 size_t avail, buffer_size = dfse->size; 333 u64 lpos_64; 334 335 /* make sure we know about any failures on the DSP side */ 336 priv->dtrace_error = false; 337 338 /* check pos and count */ 339 if (lpos < 0) 340 return -EINVAL; 341 if (!count) 342 return 0; 343 344 /* check for buffer wrap and count overflow */ 345 lpos_64 = lpos; 346 lpos = do_div(lpos_64, buffer_size); 347 348 /* get available count based on current host offset */ 349 avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size); 350 if (priv->dtrace_error) { 351 dev_err(sdev->dev, "trace IO error\n"); 352 return -EIO; 353 } 354 355 /* no new trace data */ 356 if (!avail) 357 return 0; 358 359 /* make sure count is <= avail */ 360 if (count > avail) 361 count = avail; 362 363 /* 364 * make sure that all trace data is available for the CPU as the trace 365 * data buffer might be allocated from non consistent memory. 366 * Note: snd_dma_buffer_sync() is called for normal audio playback and 367 * capture streams also. 368 */ 369 snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU); 370 /* copy available trace data to debugfs */ 371 rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count); 372 if (rem) 373 return -EFAULT; 374 375 *ppos += count; 376 377 /* move debugfs reading position */ 378 return count; 379 } 380 381 static int dfsentry_dtrace_release(struct inode *inode, struct file *file) 382 { 383 struct snd_sof_dfsentry *dfse = inode->i_private; 384 struct snd_sof_dev *sdev = dfse->sdev; 385 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 386 387 /* avoid duplicate traces at next open */ 388 if (priv->dtrace_state != SOF_DTRACE_ENABLED) 389 sof_dtrace_set_host_offset(priv, 0); 390 391 return 0; 392 } 393 394 static const struct file_operations sof_dfs_dtrace_fops = { 395 .open = simple_open, 396 .read = dfsentry_dtrace_read, 397 .llseek = default_llseek, 398 .release = dfsentry_dtrace_release, 399 }; 400 401 static int debugfs_create_dtrace(struct snd_sof_dev *sdev) 402 { 403 struct sof_dtrace_priv *priv; 404 struct snd_sof_dfsentry *dfse; 405 int ret; 406 407 if (!sdev) 408 return -EINVAL; 409 410 priv = sdev->fw_trace_data; 411 412 ret = debugfs_create_trace_filter(sdev); 413 if (ret < 0) 414 dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret); 415 416 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 417 if (!dfse) 418 return -ENOMEM; 419 420 dfse->type = SOF_DFSENTRY_TYPE_BUF; 421 dfse->buf = priv->dmatb.area; 422 dfse->size = priv->dmatb.bytes; 423 dfse->sdev = sdev; 424 425 debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse, 426 &sof_dfs_dtrace_fops); 427 428 return 0; 429 } 430 431 static int ipc3_dtrace_enable(struct snd_sof_dev *sdev) 432 { 433 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 434 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 435 struct sof_ipc_fw_version *v = &ready->version; 436 struct sof_ipc_dma_trace_params_ext params; 437 struct sof_ipc_reply ipc_reply; 438 int ret; 439 440 if (!sdev->fw_trace_is_supported) 441 return 0; 442 443 if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages) 444 return -EINVAL; 445 446 if (priv->dtrace_state == SOF_DTRACE_STOPPED) 447 goto start; 448 449 /* set IPC parameters */ 450 params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG; 451 /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */ 452 if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) { 453 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext); 454 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT; 455 params.timestamp_ns = ktime_get(); /* in nanosecond */ 456 } else { 457 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params); 458 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS; 459 } 460 params.buffer.phy_addr = priv->dmatp.addr; 461 params.buffer.size = priv->dmatb.bytes; 462 params.buffer.pages = priv->dma_trace_pages; 463 params.stream_tag = 0; 464 465 sof_dtrace_set_host_offset(priv, 0); 466 priv->dtrace_draining = false; 467 468 ret = sof_dtrace_host_init(sdev, &priv->dmatb, ¶ms); 469 if (ret < 0) { 470 dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret); 471 return ret; 472 } 473 dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag); 474 475 /* send IPC to the DSP */ 476 priv->dtrace_state = SOF_DTRACE_INITIALIZING; 477 ret = sof_ipc_tx_message(sdev->ipc, ¶ms, sizeof(params), &ipc_reply, sizeof(ipc_reply)); 478 if (ret < 0) { 479 dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret); 480 goto trace_release; 481 } 482 483 start: 484 priv->dtrace_state = SOF_DTRACE_ENABLED; 485 486 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START); 487 if (ret < 0) { 488 dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret); 489 goto trace_release; 490 } 491 492 return 0; 493 494 trace_release: 495 priv->dtrace_state = SOF_DTRACE_DISABLED; 496 sof_dtrace_host_release(sdev); 497 return ret; 498 } 499 500 static int ipc3_dtrace_init(struct snd_sof_dev *sdev) 501 { 502 struct sof_dtrace_priv *priv; 503 int ret; 504 505 /* dtrace is only supported with SOF_IPC */ 506 if (sdev->pdata->ipc_type != SOF_IPC) 507 return -EOPNOTSUPP; 508 509 if (sdev->fw_trace_data) { 510 dev_err(sdev->dev, "fw_trace_data has been already allocated\n"); 511 return -EBUSY; 512 } 513 514 priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL); 515 if (!priv) 516 return -ENOMEM; 517 518 sdev->fw_trace_data = priv; 519 520 /* set false before start initialization */ 521 priv->dtrace_state = SOF_DTRACE_DISABLED; 522 523 /* allocate trace page table buffer */ 524 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, 525 PAGE_SIZE, &priv->dmatp); 526 if (ret < 0) { 527 dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret); 528 return ret; 529 } 530 531 /* allocate trace data buffer */ 532 ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev, 533 DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE, 534 &priv->dmatb); 535 if (ret < 0) { 536 dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret); 537 goto page_err; 538 } 539 540 /* create compressed page table for audio firmware */ 541 ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb, 542 priv->dmatp.area, priv->dmatb.bytes); 543 if (ret < 0) 544 goto table_err; 545 546 priv->dma_trace_pages = ret; 547 dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages); 548 549 if (sdev->first_boot) { 550 ret = debugfs_create_dtrace(sdev); 551 if (ret < 0) 552 goto table_err; 553 } 554 555 init_waitqueue_head(&priv->trace_sleep); 556 557 ret = ipc3_dtrace_enable(sdev); 558 if (ret < 0) 559 goto table_err; 560 561 return 0; 562 table_err: 563 priv->dma_trace_pages = 0; 564 snd_dma_free_pages(&priv->dmatb); 565 page_err: 566 snd_dma_free_pages(&priv->dmatp); 567 return ret; 568 } 569 570 int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev, 571 struct sof_ipc_dma_trace_posn *posn) 572 { 573 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 574 575 if (!sdev->fw_trace_is_supported) 576 return 0; 577 578 if (trace_pos_update_expected(priv) && 579 sof_dtrace_set_host_offset(priv, posn->host_offset)) 580 wake_up(&priv->trace_sleep); 581 582 if (posn->overflow != 0) 583 dev_err(sdev->dev, 584 "DSP trace buffer overflow %u bytes. Total messages %d\n", 585 posn->overflow, posn->messages); 586 587 return 0; 588 } 589 590 /* an error has occurred within the DSP that prevents further trace */ 591 static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev) 592 { 593 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 594 595 if (priv->dtrace_state == SOF_DTRACE_ENABLED) { 596 priv->dtrace_error = true; 597 wake_up(&priv->trace_sleep); 598 } 599 } 600 601 static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop) 602 { 603 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 604 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 605 struct sof_ipc_fw_version *v = &ready->version; 606 struct sof_ipc_cmd_hdr hdr; 607 struct sof_ipc_reply ipc_reply; 608 int ret; 609 610 if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED) 611 return; 612 613 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP); 614 if (ret < 0) 615 dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret); 616 priv->dtrace_state = SOF_DTRACE_STOPPED; 617 618 /* 619 * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from 620 * ABI 3.20.0 onwards 621 */ 622 if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) { 623 hdr.size = sizeof(hdr); 624 hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE; 625 626 ret = sof_ipc_tx_message(sdev->ipc, &hdr, hdr.size, 627 &ipc_reply, sizeof(ipc_reply)); 628 if (ret < 0) 629 dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret); 630 } 631 632 if (only_stop) 633 goto out; 634 635 ret = sof_dtrace_host_release(sdev); 636 if (ret < 0) 637 dev_err(sdev->dev, "Host dtrace release failed %d\n", ret); 638 639 priv->dtrace_state = SOF_DTRACE_DISABLED; 640 641 out: 642 priv->dtrace_draining = true; 643 wake_up(&priv->trace_sleep); 644 } 645 646 static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state) 647 { 648 ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0); 649 } 650 651 static int ipc3_dtrace_resume(struct snd_sof_dev *sdev) 652 { 653 return ipc3_dtrace_enable(sdev); 654 } 655 656 static void ipc3_dtrace_free(struct snd_sof_dev *sdev) 657 { 658 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 659 660 /* release trace */ 661 ipc3_dtrace_release(sdev, false); 662 663 if (priv->dma_trace_pages) { 664 snd_dma_free_pages(&priv->dmatb); 665 snd_dma_free_pages(&priv->dmatp); 666 priv->dma_trace_pages = 0; 667 } 668 } 669 670 const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = { 671 .init = ipc3_dtrace_init, 672 .free = ipc3_dtrace_free, 673 .fw_crashed = ipc3_dtrace_fw_crashed, 674 .suspend = ipc3_dtrace_suspend, 675 .resume = ipc3_dtrace_resume, 676 }; 677