1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2022 Intel Corporation. All rights reserved. 4 // 5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 6 7 #include <linux/debugfs.h> 8 #include <linux/sched/signal.h> 9 #include "sof-priv.h" 10 #include "sof-audio.h" 11 #include "ops.h" 12 #include "sof-utils.h" 13 #include "ipc3-priv.h" 14 15 #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4 16 #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024 17 18 enum sof_dtrace_state { 19 SOF_DTRACE_DISABLED, 20 SOF_DTRACE_STOPPED, 21 SOF_DTRACE_INITIALIZING, 22 SOF_DTRACE_ENABLED, 23 }; 24 25 struct sof_dtrace_priv { 26 struct snd_dma_buffer dmatb; 27 struct snd_dma_buffer dmatp; 28 int dma_trace_pages; 29 wait_queue_head_t trace_sleep; 30 u32 host_offset; 31 bool dtrace_error; 32 bool dtrace_draining; 33 enum sof_dtrace_state dtrace_state; 34 }; 35 36 static bool trace_pos_update_expected(struct sof_dtrace_priv *priv) 37 { 38 if (priv->dtrace_state == SOF_DTRACE_ENABLED || 39 priv->dtrace_state == SOF_DTRACE_INITIALIZING) 40 return true; 41 42 return false; 43 } 44 45 static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value, 46 struct sof_ipc_trace_filter_elem *elem_list, 47 int capacity, int *counter) 48 { 49 if (*counter >= capacity) 50 return -ENOMEM; 51 52 elem_list[*counter].key = key; 53 elem_list[*counter].value = value; 54 ++*counter; 55 56 return 0; 57 } 58 59 static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line, 60 struct sof_ipc_trace_filter_elem *elem, 61 int capacity, int *counter) 62 { 63 int log_level, pipe_id, comp_id, read, ret; 64 int len = strlen(line); 65 int cnt = *counter; 66 u32 uuid_id; 67 68 /* ignore empty content */ 69 ret = sscanf(line, " %n", &read); 70 if (!ret && read == len) 71 return len; 72 73 ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read); 74 if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) { 75 dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line); 76 return -EINVAL; 77 } 78 79 if (uuid_id > 0) { 80 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID, 81 uuid_id, elem, capacity, &cnt); 82 if (ret) 83 return ret; 84 } 85 if (pipe_id >= 0) { 86 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE, 87 pipe_id, elem, capacity, &cnt); 88 if (ret) 89 return ret; 90 } 91 if (comp_id >= 0) { 92 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP, 93 comp_id, elem, capacity, &cnt); 94 if (ret) 95 return ret; 96 } 97 98 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL | 99 SOF_IPC_TRACE_FILTER_ELEM_FIN, 100 log_level, elem, capacity, &cnt); 101 if (ret) 102 return ret; 103 104 /* update counter only when parsing whole entry passed */ 105 *counter = cnt; 106 107 return len; 108 } 109 110 static int trace_filter_parse(struct snd_sof_dev *sdev, char *string, 111 int *out_elem_cnt, 112 struct sof_ipc_trace_filter_elem **out) 113 { 114 static const char entry_delimiter[] = ";"; 115 char *entry = string; 116 int capacity = 0; 117 int entry_len; 118 int cnt = 0; 119 120 /* 121 * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY 122 * IPC elements, depending on content. Calculate IPC elements capacity 123 * for the input string where each element is set. 124 */ 125 while (entry) { 126 capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY; 127 entry = strchr(entry + 1, entry_delimiter[0]); 128 } 129 *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL); 130 if (!*out) 131 return -ENOMEM; 132 133 /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */ 134 while ((entry = strsep(&string, entry_delimiter))) { 135 entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt); 136 if (entry_len < 0) { 137 dev_err(sdev->dev, 138 "Parsing filter entry '%s' failed with %d\n", 139 entry, entry_len); 140 kfree(*out); 141 return -EINVAL; 142 } 143 } 144 145 *out_elem_cnt = cnt; 146 147 return 0; 148 } 149 150 static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems, 151 struct sof_ipc_trace_filter_elem *elems) 152 { 153 struct sof_ipc_trace_filter *msg; 154 size_t size; 155 int ret; 156 157 size = struct_size(msg, elems, num_elems); 158 if (size > SOF_IPC_MSG_MAX_SIZE) 159 return -EINVAL; 160 161 msg = kmalloc(size, GFP_KERNEL); 162 if (!msg) 163 return -ENOMEM; 164 165 msg->hdr.size = size; 166 msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE; 167 msg->elem_cnt = num_elems; 168 memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems)); 169 170 ret = pm_runtime_resume_and_get(sdev->dev); 171 if (ret < 0 && ret != -EACCES) { 172 dev_err(sdev->dev, "enabling device failed: %d\n", ret); 173 goto error; 174 } 175 ret = sof_ipc_tx_message_no_reply(sdev->ipc, msg, msg->hdr.size); 176 pm_runtime_mark_last_busy(sdev->dev); 177 pm_runtime_put_autosuspend(sdev->dev); 178 179 error: 180 kfree(msg); 181 return ret; 182 } 183 184 static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from, 185 size_t count, loff_t *ppos) 186 { 187 struct snd_sof_dfsentry *dfse = file->private_data; 188 struct sof_ipc_trace_filter_elem *elems = NULL; 189 struct snd_sof_dev *sdev = dfse->sdev; 190 int num_elems; 191 char *string; 192 int ret; 193 194 if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) { 195 dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count, 196 TRACE_FILTER_MAX_CONFIG_STRING_LENGTH); 197 return -EINVAL; 198 } 199 200 string = memdup_user_nul(from, count); 201 if (IS_ERR(string)) 202 return PTR_ERR(string); 203 204 ret = trace_filter_parse(sdev, string, &num_elems, &elems); 205 if (ret < 0) 206 goto error; 207 208 if (num_elems) { 209 ret = ipc3_trace_update_filter(sdev, num_elems, elems); 210 if (ret < 0) { 211 dev_err(sdev->dev, "Filter update failed: %d\n", ret); 212 kfree(elems); 213 goto error; 214 } 215 } 216 ret = count; 217 error: 218 kfree(string); 219 return ret; 220 } 221 222 static const struct file_operations sof_dfs_trace_filter_fops = { 223 .open = simple_open, 224 .write = dfsentry_trace_filter_write, 225 .llseek = default_llseek, 226 }; 227 228 static int debugfs_create_trace_filter(struct snd_sof_dev *sdev) 229 { 230 struct snd_sof_dfsentry *dfse; 231 232 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 233 if (!dfse) 234 return -ENOMEM; 235 236 dfse->sdev = sdev; 237 dfse->type = SOF_DFSENTRY_TYPE_BUF; 238 239 debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse, 240 &sof_dfs_trace_filter_fops); 241 /* add to dfsentry list */ 242 list_add(&dfse->list, &sdev->dfsentry_list); 243 244 return 0; 245 } 246 247 static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset) 248 { 249 u32 host_offset = READ_ONCE(priv->host_offset); 250 251 if (host_offset != new_offset) { 252 /* This is a bit paranoid and unlikely that it is needed */ 253 u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset); 254 255 if (ret == host_offset) 256 return true; 257 } 258 259 return false; 260 } 261 262 static size_t sof_dtrace_avail(struct snd_sof_dev *sdev, 263 loff_t pos, size_t buffer_size) 264 { 265 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 266 loff_t host_offset = READ_ONCE(priv->host_offset); 267 268 /* 269 * If host offset is less than local pos, it means write pointer of 270 * host DMA buffer has been wrapped. We should output the trace data 271 * at the end of host DMA buffer at first. 272 */ 273 if (host_offset < pos) 274 return buffer_size - pos; 275 276 /* If there is available trace data now, it is unnecessary to wait. */ 277 if (host_offset > pos) 278 return host_offset - pos; 279 280 return 0; 281 } 282 283 static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos, 284 size_t buffer_size) 285 { 286 size_t ret = sof_dtrace_avail(sdev, pos, buffer_size); 287 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 288 wait_queue_entry_t wait; 289 290 /* data immediately available */ 291 if (ret) 292 return ret; 293 294 if (priv->dtrace_draining && !trace_pos_update_expected(priv)) { 295 /* 296 * tracing has ended and all traces have been 297 * read by client, return EOF 298 */ 299 priv->dtrace_draining = false; 300 return 0; 301 } 302 303 /* wait for available trace data from FW */ 304 init_waitqueue_entry(&wait, current); 305 set_current_state(TASK_INTERRUPTIBLE); 306 add_wait_queue(&priv->trace_sleep, &wait); 307 308 if (!signal_pending(current)) { 309 /* set timeout to max value, no error code */ 310 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 311 } 312 remove_wait_queue(&priv->trace_sleep, &wait); 313 314 return sof_dtrace_avail(sdev, pos, buffer_size); 315 } 316 317 static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer, 318 size_t count, loff_t *ppos) 319 { 320 struct snd_sof_dfsentry *dfse = file->private_data; 321 struct snd_sof_dev *sdev = dfse->sdev; 322 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 323 unsigned long rem; 324 loff_t lpos = *ppos; 325 size_t avail, buffer_size = dfse->size; 326 u64 lpos_64; 327 328 /* make sure we know about any failures on the DSP side */ 329 priv->dtrace_error = false; 330 331 /* check pos and count */ 332 if (lpos < 0) 333 return -EINVAL; 334 if (!count) 335 return 0; 336 337 /* check for buffer wrap and count overflow */ 338 lpos_64 = lpos; 339 lpos = do_div(lpos_64, buffer_size); 340 341 /* get available count based on current host offset */ 342 avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size); 343 if (priv->dtrace_error) { 344 dev_err(sdev->dev, "trace IO error\n"); 345 return -EIO; 346 } 347 348 /* no new trace data */ 349 if (!avail) 350 return 0; 351 352 /* make sure count is <= avail */ 353 if (count > avail) 354 count = avail; 355 356 /* 357 * make sure that all trace data is available for the CPU as the trace 358 * data buffer might be allocated from non consistent memory. 359 * Note: snd_dma_buffer_sync() is called for normal audio playback and 360 * capture streams also. 361 */ 362 snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU); 363 /* copy available trace data to debugfs */ 364 rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count); 365 if (rem) 366 return -EFAULT; 367 368 *ppos += count; 369 370 /* move debugfs reading position */ 371 return count; 372 } 373 374 static int dfsentry_dtrace_release(struct inode *inode, struct file *file) 375 { 376 struct snd_sof_dfsentry *dfse = inode->i_private; 377 struct snd_sof_dev *sdev = dfse->sdev; 378 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 379 380 /* avoid duplicate traces at next open */ 381 if (priv->dtrace_state != SOF_DTRACE_ENABLED) 382 sof_dtrace_set_host_offset(priv, 0); 383 384 return 0; 385 } 386 387 static const struct file_operations sof_dfs_dtrace_fops = { 388 .open = simple_open, 389 .read = dfsentry_dtrace_read, 390 .llseek = default_llseek, 391 .release = dfsentry_dtrace_release, 392 }; 393 394 static int debugfs_create_dtrace(struct snd_sof_dev *sdev) 395 { 396 struct sof_dtrace_priv *priv; 397 struct snd_sof_dfsentry *dfse; 398 int ret; 399 400 if (!sdev) 401 return -EINVAL; 402 403 priv = sdev->fw_trace_data; 404 405 ret = debugfs_create_trace_filter(sdev); 406 if (ret < 0) 407 dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret); 408 409 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 410 if (!dfse) 411 return -ENOMEM; 412 413 dfse->type = SOF_DFSENTRY_TYPE_BUF; 414 dfse->buf = priv->dmatb.area; 415 dfse->size = priv->dmatb.bytes; 416 dfse->sdev = sdev; 417 418 debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse, 419 &sof_dfs_dtrace_fops); 420 421 return 0; 422 } 423 424 static int ipc3_dtrace_enable(struct snd_sof_dev *sdev) 425 { 426 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 427 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 428 struct sof_ipc_fw_version *v = &ready->version; 429 struct sof_ipc_dma_trace_params_ext params; 430 int ret; 431 432 if (!sdev->fw_trace_is_supported) 433 return 0; 434 435 if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages) 436 return -EINVAL; 437 438 if (priv->dtrace_state == SOF_DTRACE_STOPPED) 439 goto start; 440 441 /* set IPC parameters */ 442 params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG; 443 /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */ 444 if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) { 445 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext); 446 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT; 447 params.timestamp_ns = ktime_get(); /* in nanosecond */ 448 } else { 449 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params); 450 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS; 451 } 452 params.buffer.phy_addr = priv->dmatp.addr; 453 params.buffer.size = priv->dmatb.bytes; 454 params.buffer.pages = priv->dma_trace_pages; 455 params.stream_tag = 0; 456 457 sof_dtrace_set_host_offset(priv, 0); 458 priv->dtrace_draining = false; 459 460 ret = sof_dtrace_host_init(sdev, &priv->dmatb, ¶ms); 461 if (ret < 0) { 462 dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret); 463 return ret; 464 } 465 dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag); 466 467 /* send IPC to the DSP */ 468 priv->dtrace_state = SOF_DTRACE_INITIALIZING; 469 ret = sof_ipc_tx_message_no_reply(sdev->ipc, ¶ms, sizeof(params)); 470 if (ret < 0) { 471 dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret); 472 goto trace_release; 473 } 474 475 start: 476 priv->dtrace_state = SOF_DTRACE_ENABLED; 477 478 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START); 479 if (ret < 0) { 480 dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret); 481 goto trace_release; 482 } 483 484 return 0; 485 486 trace_release: 487 priv->dtrace_state = SOF_DTRACE_DISABLED; 488 sof_dtrace_host_release(sdev); 489 return ret; 490 } 491 492 static int ipc3_dtrace_init(struct snd_sof_dev *sdev) 493 { 494 struct sof_dtrace_priv *priv; 495 int ret; 496 497 /* dtrace is only supported with SOF_IPC */ 498 if (sdev->pdata->ipc_type != SOF_IPC_TYPE_3) 499 return -EOPNOTSUPP; 500 501 if (sdev->fw_trace_data) { 502 dev_err(sdev->dev, "fw_trace_data has been already allocated\n"); 503 return -EBUSY; 504 } 505 506 priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL); 507 if (!priv) 508 return -ENOMEM; 509 510 sdev->fw_trace_data = priv; 511 512 /* set false before start initialization */ 513 priv->dtrace_state = SOF_DTRACE_DISABLED; 514 515 /* allocate trace page table buffer */ 516 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, 517 PAGE_SIZE, &priv->dmatp); 518 if (ret < 0) { 519 dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret); 520 return ret; 521 } 522 523 /* allocate trace data buffer */ 524 ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev, 525 DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE, 526 &priv->dmatb); 527 if (ret < 0) { 528 dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret); 529 goto page_err; 530 } 531 532 /* create compressed page table for audio firmware */ 533 ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb, 534 priv->dmatp.area, priv->dmatb.bytes); 535 if (ret < 0) 536 goto table_err; 537 538 priv->dma_trace_pages = ret; 539 dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages); 540 541 if (sdev->first_boot) { 542 ret = debugfs_create_dtrace(sdev); 543 if (ret < 0) 544 goto table_err; 545 } 546 547 init_waitqueue_head(&priv->trace_sleep); 548 549 ret = ipc3_dtrace_enable(sdev); 550 if (ret < 0) 551 goto table_err; 552 553 return 0; 554 table_err: 555 priv->dma_trace_pages = 0; 556 snd_dma_free_pages(&priv->dmatb); 557 page_err: 558 snd_dma_free_pages(&priv->dmatp); 559 return ret; 560 } 561 562 int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev, 563 struct sof_ipc_dma_trace_posn *posn) 564 { 565 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 566 567 if (!sdev->fw_trace_is_supported) 568 return 0; 569 570 if (trace_pos_update_expected(priv) && 571 sof_dtrace_set_host_offset(priv, posn->host_offset)) 572 wake_up(&priv->trace_sleep); 573 574 if (posn->overflow != 0) 575 dev_err(sdev->dev, 576 "DSP trace buffer overflow %u bytes. Total messages %d\n", 577 posn->overflow, posn->messages); 578 579 return 0; 580 } 581 582 /* an error has occurred within the DSP that prevents further trace */ 583 static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev) 584 { 585 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 586 587 if (priv->dtrace_state == SOF_DTRACE_ENABLED) { 588 priv->dtrace_error = true; 589 wake_up(&priv->trace_sleep); 590 } 591 } 592 593 static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop) 594 { 595 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 596 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 597 struct sof_ipc_fw_version *v = &ready->version; 598 struct sof_ipc_cmd_hdr hdr; 599 int ret; 600 601 if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED) 602 return; 603 604 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP); 605 if (ret < 0) 606 dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret); 607 priv->dtrace_state = SOF_DTRACE_STOPPED; 608 609 /* 610 * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from 611 * ABI 3.20.0 onwards 612 */ 613 if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) { 614 hdr.size = sizeof(hdr); 615 hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE; 616 617 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &hdr, hdr.size); 618 if (ret < 0) 619 dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret); 620 } 621 622 if (only_stop) 623 goto out; 624 625 ret = sof_dtrace_host_release(sdev); 626 if (ret < 0) 627 dev_err(sdev->dev, "Host dtrace release failed %d\n", ret); 628 629 priv->dtrace_state = SOF_DTRACE_DISABLED; 630 631 out: 632 priv->dtrace_draining = true; 633 wake_up(&priv->trace_sleep); 634 } 635 636 static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state) 637 { 638 ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0); 639 } 640 641 static int ipc3_dtrace_resume(struct snd_sof_dev *sdev) 642 { 643 return ipc3_dtrace_enable(sdev); 644 } 645 646 static void ipc3_dtrace_free(struct snd_sof_dev *sdev) 647 { 648 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 649 650 /* release trace */ 651 ipc3_dtrace_release(sdev, false); 652 653 if (priv->dma_trace_pages) { 654 snd_dma_free_pages(&priv->dmatb); 655 snd_dma_free_pages(&priv->dmatp); 656 priv->dma_trace_pages = 0; 657 } 658 } 659 660 const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = { 661 .init = ipc3_dtrace_init, 662 .free = ipc3_dtrace_free, 663 .fw_crashed = ipc3_dtrace_fw_crashed, 664 .suspend = ipc3_dtrace_suspend, 665 .resume = ipc3_dtrace_resume, 666 }; 667