1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2022 Intel Corporation. All rights reserved. 4 // 5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 6 7 #include <linux/debugfs.h> 8 #include <linux/sched/signal.h> 9 #include "sof-priv.h" 10 #include "sof-audio.h" 11 #include "ops.h" 12 #include "sof-utils.h" 13 #include "ipc3-priv.h" 14 15 #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4 16 #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024 17 18 enum sof_dtrace_state { 19 SOF_DTRACE_DISABLED, 20 SOF_DTRACE_STOPPED, 21 SOF_DTRACE_ENABLED, 22 }; 23 24 struct sof_dtrace_priv { 25 struct snd_dma_buffer dmatb; 26 struct snd_dma_buffer dmatp; 27 int dma_trace_pages; 28 wait_queue_head_t trace_sleep; 29 u32 host_offset; 30 bool dtrace_error; 31 bool dtrace_draining; 32 enum sof_dtrace_state dtrace_state; 33 }; 34 35 static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value, 36 struct sof_ipc_trace_filter_elem *elem_list, 37 int capacity, int *counter) 38 { 39 if (*counter >= capacity) 40 return -ENOMEM; 41 42 elem_list[*counter].key = key; 43 elem_list[*counter].value = value; 44 ++*counter; 45 46 return 0; 47 } 48 49 static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line, 50 struct sof_ipc_trace_filter_elem *elem, 51 int capacity, int *counter) 52 { 53 int log_level, pipe_id, comp_id, read, ret; 54 int len = strlen(line); 55 int cnt = *counter; 56 u32 uuid_id; 57 58 /* ignore empty content */ 59 ret = sscanf(line, " %n", &read); 60 if (!ret && read == len) 61 return len; 62 63 ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read); 64 if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) { 65 dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line); 66 return -EINVAL; 67 } 68 69 if (uuid_id > 0) { 70 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID, 71 uuid_id, elem, capacity, &cnt); 72 if (ret) 73 return ret; 74 } 75 if (pipe_id >= 0) { 76 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE, 77 pipe_id, elem, capacity, &cnt); 78 if (ret) 79 return ret; 80 } 81 if (comp_id >= 0) { 82 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP, 83 comp_id, elem, capacity, &cnt); 84 if (ret) 85 return ret; 86 } 87 88 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL | 89 SOF_IPC_TRACE_FILTER_ELEM_FIN, 90 log_level, elem, capacity, &cnt); 91 if (ret) 92 return ret; 93 94 /* update counter only when parsing whole entry passed */ 95 *counter = cnt; 96 97 return len; 98 } 99 100 static int trace_filter_parse(struct snd_sof_dev *sdev, char *string, 101 int *out_elem_cnt, 102 struct sof_ipc_trace_filter_elem **out) 103 { 104 static const char entry_delimiter[] = ";"; 105 char *entry = string; 106 int capacity = 0; 107 int entry_len; 108 int cnt = 0; 109 110 /* 111 * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY 112 * IPC elements, depending on content. Calculate IPC elements capacity 113 * for the input string where each element is set. 114 */ 115 while (entry) { 116 capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY; 117 entry = strchr(entry + 1, entry_delimiter[0]); 118 } 119 *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL); 120 if (!*out) 121 return -ENOMEM; 122 123 /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */ 124 while ((entry = strsep(&string, entry_delimiter))) { 125 entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt); 126 if (entry_len < 0) { 127 dev_err(sdev->dev, 128 "Parsing filter entry '%s' failed with %d\n", 129 entry, entry_len); 130 return -EINVAL; 131 } 132 } 133 134 *out_elem_cnt = cnt; 135 136 return 0; 137 } 138 139 static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems, 140 struct sof_ipc_trace_filter_elem *elems) 141 { 142 struct sof_ipc_trace_filter *msg; 143 struct sof_ipc_reply reply; 144 size_t size; 145 int ret; 146 147 size = struct_size(msg, elems, num_elems); 148 if (size > SOF_IPC_MSG_MAX_SIZE) 149 return -EINVAL; 150 151 msg = kmalloc(size, GFP_KERNEL); 152 if (!msg) 153 return -ENOMEM; 154 155 msg->hdr.size = size; 156 msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE; 157 msg->elem_cnt = num_elems; 158 memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems)); 159 160 ret = pm_runtime_get_sync(sdev->dev); 161 if (ret < 0 && ret != -EACCES) { 162 pm_runtime_put_noidle(sdev->dev); 163 dev_err(sdev->dev, "enabling device failed: %d\n", ret); 164 goto error; 165 } 166 ret = sof_ipc_tx_message(sdev->ipc, msg, msg->hdr.size, &reply, sizeof(reply)); 167 pm_runtime_mark_last_busy(sdev->dev); 168 pm_runtime_put_autosuspend(sdev->dev); 169 170 error: 171 kfree(msg); 172 return ret ? ret : reply.error; 173 } 174 175 static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from, 176 size_t count, loff_t *ppos) 177 { 178 struct snd_sof_dfsentry *dfse = file->private_data; 179 struct sof_ipc_trace_filter_elem *elems = NULL; 180 struct snd_sof_dev *sdev = dfse->sdev; 181 loff_t pos = 0; 182 int num_elems; 183 char *string; 184 int ret; 185 186 if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) { 187 dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count, 188 TRACE_FILTER_MAX_CONFIG_STRING_LENGTH); 189 return -EINVAL; 190 } 191 192 string = kmalloc(count + 1, GFP_KERNEL); 193 if (!string) 194 return -ENOMEM; 195 196 /* assert null termination */ 197 string[count] = 0; 198 ret = simple_write_to_buffer(string, count, &pos, from, count); 199 if (ret < 0) 200 goto error; 201 202 ret = trace_filter_parse(sdev, string, &num_elems, &elems); 203 if (ret < 0) 204 goto error; 205 206 if (num_elems) { 207 ret = ipc3_trace_update_filter(sdev, num_elems, elems); 208 if (ret < 0) { 209 dev_err(sdev->dev, "Filter update failed: %d\n", ret); 210 goto error; 211 } 212 } 213 ret = count; 214 error: 215 kfree(string); 216 kfree(elems); 217 return ret; 218 } 219 220 static const struct file_operations sof_dfs_trace_filter_fops = { 221 .open = simple_open, 222 .write = dfsentry_trace_filter_write, 223 .llseek = default_llseek, 224 }; 225 226 static int debugfs_create_trace_filter(struct snd_sof_dev *sdev) 227 { 228 struct snd_sof_dfsentry *dfse; 229 230 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 231 if (!dfse) 232 return -ENOMEM; 233 234 dfse->sdev = sdev; 235 dfse->type = SOF_DFSENTRY_TYPE_BUF; 236 237 debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse, 238 &sof_dfs_trace_filter_fops); 239 /* add to dfsentry list */ 240 list_add(&dfse->list, &sdev->dfsentry_list); 241 242 return 0; 243 } 244 245 static size_t sof_dtrace_avail(struct snd_sof_dev *sdev, 246 loff_t pos, size_t buffer_size) 247 { 248 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 249 loff_t host_offset = READ_ONCE(priv->host_offset); 250 251 /* 252 * If host offset is less than local pos, it means write pointer of 253 * host DMA buffer has been wrapped. We should output the trace data 254 * at the end of host DMA buffer at first. 255 */ 256 if (host_offset < pos) 257 return buffer_size - pos; 258 259 /* If there is available trace data now, it is unnecessary to wait. */ 260 if (host_offset > pos) 261 return host_offset - pos; 262 263 return 0; 264 } 265 266 static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos, 267 size_t buffer_size) 268 { 269 size_t ret = sof_dtrace_avail(sdev, pos, buffer_size); 270 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 271 wait_queue_entry_t wait; 272 273 /* data immediately available */ 274 if (ret) 275 return ret; 276 277 if (priv->dtrace_state != SOF_DTRACE_ENABLED && priv->dtrace_draining) { 278 /* 279 * tracing has ended and all traces have been 280 * read by client, return EOF 281 */ 282 priv->dtrace_draining = false; 283 return 0; 284 } 285 286 /* wait for available trace data from FW */ 287 init_waitqueue_entry(&wait, current); 288 set_current_state(TASK_INTERRUPTIBLE); 289 add_wait_queue(&priv->trace_sleep, &wait); 290 291 if (!signal_pending(current)) { 292 /* set timeout to max value, no error code */ 293 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 294 } 295 remove_wait_queue(&priv->trace_sleep, &wait); 296 297 return sof_dtrace_avail(sdev, pos, buffer_size); 298 } 299 300 static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer, 301 size_t count, loff_t *ppos) 302 { 303 struct snd_sof_dfsentry *dfse = file->private_data; 304 struct snd_sof_dev *sdev = dfse->sdev; 305 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 306 unsigned long rem; 307 loff_t lpos = *ppos; 308 size_t avail, buffer_size = dfse->size; 309 u64 lpos_64; 310 311 /* make sure we know about any failures on the DSP side */ 312 priv->dtrace_error = false; 313 314 /* check pos and count */ 315 if (lpos < 0) 316 return -EINVAL; 317 if (!count) 318 return 0; 319 320 /* check for buffer wrap and count overflow */ 321 lpos_64 = lpos; 322 lpos = do_div(lpos_64, buffer_size); 323 324 /* get available count based on current host offset */ 325 avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size); 326 if (priv->dtrace_error) { 327 dev_err(sdev->dev, "trace IO error\n"); 328 return -EIO; 329 } 330 331 /* make sure count is <= avail */ 332 if (count > avail) 333 count = avail; 334 335 /* 336 * make sure that all trace data is available for the CPU as the trace 337 * data buffer might be allocated from non consistent memory. 338 * Note: snd_dma_buffer_sync() is called for normal audio playback and 339 * capture streams also. 340 */ 341 snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU); 342 /* copy available trace data to debugfs */ 343 rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count); 344 if (rem) 345 return -EFAULT; 346 347 *ppos += count; 348 349 /* move debugfs reading position */ 350 return count; 351 } 352 353 static int dfsentry_dtrace_release(struct inode *inode, struct file *file) 354 { 355 struct snd_sof_dfsentry *dfse = inode->i_private; 356 struct snd_sof_dev *sdev = dfse->sdev; 357 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 358 359 /* avoid duplicate traces at next open */ 360 if (priv->dtrace_state != SOF_DTRACE_ENABLED) 361 priv->host_offset = 0; 362 363 return 0; 364 } 365 366 static const struct file_operations sof_dfs_dtrace_fops = { 367 .open = simple_open, 368 .read = dfsentry_dtrace_read, 369 .llseek = default_llseek, 370 .release = dfsentry_dtrace_release, 371 }; 372 373 static int debugfs_create_dtrace(struct snd_sof_dev *sdev) 374 { 375 struct sof_dtrace_priv *priv; 376 struct snd_sof_dfsentry *dfse; 377 int ret; 378 379 if (!sdev) 380 return -EINVAL; 381 382 priv = sdev->fw_trace_data; 383 384 ret = debugfs_create_trace_filter(sdev); 385 if (ret < 0) 386 dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret); 387 388 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 389 if (!dfse) 390 return -ENOMEM; 391 392 dfse->type = SOF_DFSENTRY_TYPE_BUF; 393 dfse->buf = priv->dmatb.area; 394 dfse->size = priv->dmatb.bytes; 395 dfse->sdev = sdev; 396 397 debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse, 398 &sof_dfs_dtrace_fops); 399 400 return 0; 401 } 402 403 static int ipc3_dtrace_enable(struct snd_sof_dev *sdev) 404 { 405 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 406 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 407 struct sof_ipc_fw_version *v = &ready->version; 408 struct sof_ipc_dma_trace_params_ext params; 409 struct sof_ipc_reply ipc_reply; 410 int ret; 411 412 if (!sdev->fw_trace_is_supported) 413 return 0; 414 415 if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages) 416 return -EINVAL; 417 418 if (priv->dtrace_state == SOF_DTRACE_STOPPED) 419 goto start; 420 421 /* set IPC parameters */ 422 params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG; 423 /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */ 424 if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) { 425 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext); 426 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT; 427 params.timestamp_ns = ktime_get(); /* in nanosecond */ 428 } else { 429 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params); 430 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS; 431 } 432 params.buffer.phy_addr = priv->dmatp.addr; 433 params.buffer.size = priv->dmatb.bytes; 434 params.buffer.pages = priv->dma_trace_pages; 435 params.stream_tag = 0; 436 437 priv->host_offset = 0; 438 priv->dtrace_draining = false; 439 440 ret = sof_dtrace_host_init(sdev, &priv->dmatb, ¶ms); 441 if (ret < 0) { 442 dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret); 443 return ret; 444 } 445 dev_dbg(sdev->dev, "%s: stream_tag: %d\n", __func__, params.stream_tag); 446 447 /* send IPC to the DSP */ 448 ret = sof_ipc_tx_message(sdev->ipc, ¶ms, sizeof(params), &ipc_reply, sizeof(ipc_reply)); 449 if (ret < 0) { 450 dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret); 451 goto trace_release; 452 } 453 454 start: 455 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START); 456 if (ret < 0) { 457 dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret); 458 goto trace_release; 459 } 460 461 priv->dtrace_state = SOF_DTRACE_ENABLED; 462 463 return 0; 464 465 trace_release: 466 sof_dtrace_host_release(sdev); 467 return ret; 468 } 469 470 static int ipc3_dtrace_init(struct snd_sof_dev *sdev) 471 { 472 struct sof_dtrace_priv *priv; 473 int ret; 474 475 /* dtrace is only supported with SOF_IPC */ 476 if (sdev->pdata->ipc_type != SOF_IPC) 477 return -EOPNOTSUPP; 478 479 if (sdev->fw_trace_data) { 480 dev_err(sdev->dev, "fw_trace_data has been already allocated\n"); 481 return -EBUSY; 482 } 483 484 priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL); 485 if (!priv) 486 return -ENOMEM; 487 488 sdev->fw_trace_data = priv; 489 490 /* set false before start initialization */ 491 priv->dtrace_state = SOF_DTRACE_DISABLED; 492 493 /* allocate trace page table buffer */ 494 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, 495 PAGE_SIZE, &priv->dmatp); 496 if (ret < 0) { 497 dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret); 498 return ret; 499 } 500 501 /* allocate trace data buffer */ 502 ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev, 503 DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE, 504 &priv->dmatb); 505 if (ret < 0) { 506 dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret); 507 goto page_err; 508 } 509 510 /* create compressed page table for audio firmware */ 511 ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb, 512 priv->dmatp.area, priv->dmatb.bytes); 513 if (ret < 0) 514 goto table_err; 515 516 priv->dma_trace_pages = ret; 517 dev_dbg(sdev->dev, "%s: dma_trace_pages: %d\n", __func__, 518 priv->dma_trace_pages); 519 520 if (sdev->first_boot) { 521 ret = debugfs_create_dtrace(sdev); 522 if (ret < 0) 523 goto table_err; 524 } 525 526 init_waitqueue_head(&priv->trace_sleep); 527 528 ret = ipc3_dtrace_enable(sdev); 529 if (ret < 0) 530 goto table_err; 531 532 return 0; 533 table_err: 534 priv->dma_trace_pages = 0; 535 snd_dma_free_pages(&priv->dmatb); 536 page_err: 537 snd_dma_free_pages(&priv->dmatp); 538 return ret; 539 } 540 541 int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev, 542 struct sof_ipc_dma_trace_posn *posn) 543 { 544 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 545 546 if (!sdev->fw_trace_is_supported) 547 return 0; 548 549 if (priv->dtrace_state == SOF_DTRACE_ENABLED && 550 priv->host_offset != posn->host_offset) { 551 priv->host_offset = posn->host_offset; 552 wake_up(&priv->trace_sleep); 553 } 554 555 if (posn->overflow != 0) 556 dev_err(sdev->dev, 557 "DSP trace buffer overflow %u bytes. Total messages %d\n", 558 posn->overflow, posn->messages); 559 560 return 0; 561 } 562 563 /* an error has occurred within the DSP that prevents further trace */ 564 static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev) 565 { 566 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 567 568 if (priv->dtrace_state == SOF_DTRACE_ENABLED) { 569 priv->dtrace_error = true; 570 wake_up(&priv->trace_sleep); 571 } 572 } 573 574 static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop) 575 { 576 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 577 struct sof_ipc_fw_ready *ready = &sdev->fw_ready; 578 struct sof_ipc_fw_version *v = &ready->version; 579 struct sof_ipc_cmd_hdr hdr; 580 struct sof_ipc_reply ipc_reply; 581 int ret; 582 583 if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED) 584 return; 585 586 ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP); 587 if (ret < 0) 588 dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret); 589 priv->dtrace_state = SOF_DTRACE_STOPPED; 590 591 /* 592 * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from 593 * ABI 3.20.0 onwards 594 */ 595 if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) { 596 hdr.size = sizeof(hdr); 597 hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE; 598 599 ret = sof_ipc_tx_message(sdev->ipc, &hdr, hdr.size, 600 &ipc_reply, sizeof(ipc_reply)); 601 if (ret < 0) 602 dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret); 603 } 604 605 if (only_stop) 606 goto out; 607 608 ret = sof_dtrace_host_release(sdev); 609 if (ret < 0) 610 dev_err(sdev->dev, "Host dtrace release failed %d\n", ret); 611 612 priv->dtrace_state = SOF_DTRACE_DISABLED; 613 614 out: 615 priv->dtrace_draining = true; 616 wake_up(&priv->trace_sleep); 617 } 618 619 static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state) 620 { 621 ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0); 622 } 623 624 static int ipc3_dtrace_resume(struct snd_sof_dev *sdev) 625 { 626 return ipc3_dtrace_enable(sdev); 627 } 628 629 static void ipc3_dtrace_free(struct snd_sof_dev *sdev) 630 { 631 struct sof_dtrace_priv *priv = sdev->fw_trace_data; 632 633 /* release trace */ 634 ipc3_dtrace_release(sdev, false); 635 636 if (priv->dma_trace_pages) { 637 snd_dma_free_pages(&priv->dmatb); 638 snd_dma_free_pages(&priv->dmatp); 639 priv->dma_trace_pages = 0; 640 } 641 } 642 643 const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = { 644 .init = ipc3_dtrace_init, 645 .free = ipc3_dtrace_free, 646 .fw_crashed = ipc3_dtrace_fw_crashed, 647 .suspend = ipc3_dtrace_suspend, 648 .resume = ipc3_dtrace_resume, 649 }; 650