1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation. All rights reserved. 7 // 8 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // 10 // Generic debug routines used to export DSP MMIO and memories to userspace 11 // for firmware debugging. 12 // 13 14 #include <linux/debugfs.h> 15 #include <linux/io.h> 16 #include <linux/pm_runtime.h> 17 #include "sof-priv.h" 18 #include "ops.h" 19 20 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 21 #define MAX_IPC_FLOOD_DURATION_MS 1000 22 #define MAX_IPC_FLOOD_COUNT 10000 23 #define IPC_FLOOD_TEST_RESULT_LEN 512 24 25 static int sof_debug_ipc_flood_test(struct snd_sof_dev *sdev, 26 struct snd_sof_dfsentry *dfse, 27 bool flood_duration_test, 28 unsigned long ipc_duration_ms, 29 unsigned long ipc_count) 30 { 31 struct sof_ipc_cmd_hdr hdr; 32 struct sof_ipc_reply reply; 33 u64 min_response_time = U64_MAX; 34 ktime_t start, end, test_end; 35 u64 avg_response_time = 0; 36 u64 max_response_time = 0; 37 u64 ipc_response_time; 38 int i = 0; 39 int ret; 40 41 /* configure test IPC */ 42 hdr.cmd = SOF_IPC_GLB_TEST_MSG | SOF_IPC_TEST_IPC_FLOOD; 43 hdr.size = sizeof(hdr); 44 45 /* set test end time for duration flood test */ 46 if (flood_duration_test) 47 test_end = ktime_get_ns() + ipc_duration_ms * NSEC_PER_MSEC; 48 49 /* send test IPC's */ 50 while (1) { 51 start = ktime_get(); 52 ret = sof_ipc_tx_message(sdev->ipc, hdr.cmd, &hdr, hdr.size, 53 &reply, sizeof(reply)); 54 end = ktime_get(); 55 56 if (ret < 0) 57 break; 58 59 /* compute min and max response times */ 60 ipc_response_time = ktime_to_ns(ktime_sub(end, start)); 61 min_response_time = min(min_response_time, ipc_response_time); 62 max_response_time = max(max_response_time, ipc_response_time); 63 64 /* sum up response times */ 65 avg_response_time += ipc_response_time; 66 i++; 67 68 /* test complete? */ 69 if (flood_duration_test) { 70 if (ktime_to_ns(end) >= test_end) 71 break; 72 } else { 73 if (i == ipc_count) 74 break; 75 } 76 } 77 78 if (ret < 0) 79 dev_err(sdev->dev, 80 "error: ipc flood test failed at %d iterations\n", i); 81 82 /* return if the first IPC fails */ 83 if (!i) 84 return ret; 85 86 /* compute average response time */ 87 do_div(avg_response_time, i); 88 89 /* clear previous test output */ 90 memset(dfse->cache_buf, 0, IPC_FLOOD_TEST_RESULT_LEN); 91 92 if (flood_duration_test) { 93 dev_dbg(sdev->dev, "IPC Flood test duration: %lums\n", 94 ipc_duration_ms); 95 snprintf(dfse->cache_buf, IPC_FLOOD_TEST_RESULT_LEN, 96 "IPC Flood test duration: %lums\n", ipc_duration_ms); 97 } 98 99 dev_dbg(sdev->dev, 100 "IPC Flood count: %d, Avg response time: %lluns\n", 101 i, avg_response_time); 102 dev_dbg(sdev->dev, "Max response time: %lluns\n", 103 max_response_time); 104 dev_dbg(sdev->dev, "Min response time: %lluns\n", 105 min_response_time); 106 107 /* format output string */ 108 snprintf(dfse->cache_buf + strlen(dfse->cache_buf), 109 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf), 110 "IPC Flood count: %d\nAvg response time: %lluns\n", 111 i, avg_response_time); 112 113 snprintf(dfse->cache_buf + strlen(dfse->cache_buf), 114 IPC_FLOOD_TEST_RESULT_LEN - strlen(dfse->cache_buf), 115 "Max response time: %lluns\nMin response time: %lluns\n", 116 max_response_time, min_response_time); 117 118 return ret; 119 } 120 #endif 121 122 static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer, 123 size_t count, loff_t *ppos) 124 { 125 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 126 struct snd_sof_dfsentry *dfse = file->private_data; 127 struct snd_sof_dev *sdev = dfse->sdev; 128 unsigned long ipc_duration_ms = 0; 129 bool flood_duration_test = false; 130 unsigned long ipc_count = 0; 131 struct dentry *dentry; 132 int err; 133 #endif 134 size_t size; 135 char *string; 136 int ret; 137 138 string = kzalloc(count, GFP_KERNEL); 139 if (!string) 140 return -ENOMEM; 141 142 size = simple_write_to_buffer(string, count, ppos, buffer, count); 143 ret = size; 144 145 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 146 /* 147 * write op is only supported for ipc_flood_count or 148 * ipc_flood_duration_ms debugfs entries atm. 149 * ipc_flood_count floods the DSP with the number of IPC's specified. 150 * ipc_duration_ms test floods the DSP for the time specified 151 * in the debugfs entry. 152 */ 153 dentry = file->f_path.dentry; 154 if (strcmp(dentry->d_name.name, "ipc_flood_count") && 155 strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) { 156 ret = -EINVAL; 157 goto out; 158 } 159 160 if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) 161 flood_duration_test = true; 162 163 /* test completion criterion */ 164 if (flood_duration_test) 165 ret = kstrtoul(string, 0, &ipc_duration_ms); 166 else 167 ret = kstrtoul(string, 0, &ipc_count); 168 if (ret < 0) 169 goto out; 170 171 /* limit max duration/ipc count for flood test */ 172 if (flood_duration_test) { 173 if (!ipc_duration_ms) { 174 ret = size; 175 goto out; 176 } 177 178 /* find the minimum. min() is not used to avoid warnings */ 179 if (ipc_duration_ms > MAX_IPC_FLOOD_DURATION_MS) 180 ipc_duration_ms = MAX_IPC_FLOOD_DURATION_MS; 181 } else { 182 if (!ipc_count) { 183 ret = size; 184 goto out; 185 } 186 187 /* find the minimum. min() is not used to avoid warnings */ 188 if (ipc_count > MAX_IPC_FLOOD_COUNT) 189 ipc_count = MAX_IPC_FLOOD_COUNT; 190 } 191 192 ret = pm_runtime_get_sync(sdev->dev); 193 if (ret < 0) { 194 dev_err_ratelimited(sdev->dev, 195 "error: debugfs write failed to resume %d\n", 196 ret); 197 pm_runtime_put_noidle(sdev->dev); 198 goto out; 199 } 200 201 /* flood test */ 202 ret = sof_debug_ipc_flood_test(sdev, dfse, flood_duration_test, 203 ipc_duration_ms, ipc_count); 204 205 pm_runtime_mark_last_busy(sdev->dev); 206 err = pm_runtime_put_autosuspend(sdev->dev); 207 if (err < 0) 208 dev_err_ratelimited(sdev->dev, 209 "error: debugfs write failed to idle %d\n", 210 err); 211 212 /* return size if test is successful */ 213 if (ret >= 0) 214 ret = size; 215 out: 216 #endif 217 kfree(string); 218 return ret; 219 } 220 221 static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer, 222 size_t count, loff_t *ppos) 223 { 224 struct snd_sof_dfsentry *dfse = file->private_data; 225 struct snd_sof_dev *sdev = dfse->sdev; 226 loff_t pos = *ppos; 227 size_t size_ret; 228 int skip = 0; 229 int size; 230 u8 *buf; 231 232 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 233 struct dentry *dentry; 234 235 dentry = file->f_path.dentry; 236 if ((!strcmp(dentry->d_name.name, "ipc_flood_count") || 237 !strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) && 238 dfse->cache_buf) { 239 if (*ppos) 240 return 0; 241 242 count = strlen(dfse->cache_buf); 243 size_ret = copy_to_user(buffer, dfse->cache_buf, count); 244 if (size_ret) 245 return -EFAULT; 246 247 *ppos += count; 248 return count; 249 } 250 #endif 251 size = dfse->size; 252 253 /* validate position & count */ 254 if (pos < 0) 255 return -EINVAL; 256 if (pos >= size || !count) 257 return 0; 258 /* find the minimum. min() is not used since it adds sparse warnings */ 259 if (count > size - pos) 260 count = size - pos; 261 262 /* align io read start to u32 multiple */ 263 pos = ALIGN_DOWN(pos, 4); 264 265 /* intermediate buffer size must be u32 multiple */ 266 size = ALIGN(count, 4); 267 268 /* if start position is unaligned, read extra u32 */ 269 if (unlikely(pos != *ppos)) { 270 skip = *ppos - pos; 271 if (pos + size + 4 < dfse->size) 272 size += 4; 273 } 274 275 buf = kzalloc(size, GFP_KERNEL); 276 if (!buf) 277 return -ENOMEM; 278 279 if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) { 280 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE) 281 /* 282 * If the DSP is active: copy from IO. 283 * If the DSP is suspended: 284 * - Copy from IO if the memory is always accessible. 285 * - Otherwise, copy from cached buffer. 286 */ 287 if (pm_runtime_active(sdev->dev) || 288 dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) { 289 memcpy_fromio(buf, dfse->io_mem + pos, size); 290 } else { 291 dev_info(sdev->dev, 292 "Copying cached debugfs data\n"); 293 memcpy(buf, dfse->cache_buf + pos, size); 294 } 295 #else 296 /* if the DSP is in D3 */ 297 if (!pm_runtime_active(sdev->dev) && 298 dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) { 299 dev_err(sdev->dev, 300 "error: debugfs entry cannot be read in DSP D3\n"); 301 kfree(buf); 302 return -EINVAL; 303 } 304 305 memcpy_fromio(buf, dfse->io_mem + pos, size); 306 #endif 307 } else { 308 memcpy(buf, ((u8 *)(dfse->buf) + pos), size); 309 } 310 311 /* copy to userspace */ 312 size_ret = copy_to_user(buffer, buf + skip, count); 313 314 kfree(buf); 315 316 /* update count & position if copy succeeded */ 317 if (size_ret) 318 return -EFAULT; 319 320 *ppos = pos + count; 321 322 return count; 323 } 324 325 static const struct file_operations sof_dfs_fops = { 326 .open = simple_open, 327 .read = sof_dfsentry_read, 328 .llseek = default_llseek, 329 .write = sof_dfsentry_write, 330 }; 331 332 /* create FS entry for debug files that can expose DSP memories, registers */ 333 int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev, 334 void __iomem *base, size_t size, 335 const char *name, 336 enum sof_debugfs_access_type access_type) 337 { 338 struct snd_sof_dfsentry *dfse; 339 340 if (!sdev) 341 return -EINVAL; 342 343 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 344 if (!dfse) 345 return -ENOMEM; 346 347 dfse->type = SOF_DFSENTRY_TYPE_IOMEM; 348 dfse->io_mem = base; 349 dfse->size = size; 350 dfse->sdev = sdev; 351 dfse->access_type = access_type; 352 353 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE) 354 /* 355 * allocate cache buffer that will be used to save the mem window 356 * contents prior to suspend 357 */ 358 if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) { 359 dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL); 360 if (!dfse->cache_buf) 361 return -ENOMEM; 362 } 363 #endif 364 365 debugfs_create_file(name, 0444, sdev->debugfs_root, dfse, 366 &sof_dfs_fops); 367 368 /* add to dfsentry list */ 369 list_add(&dfse->list, &sdev->dfsentry_list); 370 371 return 0; 372 } 373 EXPORT_SYMBOL_GPL(snd_sof_debugfs_io_item); 374 375 /* create FS entry for debug files to expose kernel memory */ 376 int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev, 377 void *base, size_t size, 378 const char *name, mode_t mode) 379 { 380 struct snd_sof_dfsentry *dfse; 381 382 if (!sdev) 383 return -EINVAL; 384 385 dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL); 386 if (!dfse) 387 return -ENOMEM; 388 389 dfse->type = SOF_DFSENTRY_TYPE_BUF; 390 dfse->buf = base; 391 dfse->size = size; 392 dfse->sdev = sdev; 393 394 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 395 /* 396 * cache_buf is unused for SOF_DFSENTRY_TYPE_BUF debugfs entries. 397 * So, use it to save the results of the last IPC flood test. 398 */ 399 dfse->cache_buf = devm_kzalloc(sdev->dev, IPC_FLOOD_TEST_RESULT_LEN, 400 GFP_KERNEL); 401 if (!dfse->cache_buf) 402 return -ENOMEM; 403 #endif 404 405 debugfs_create_file(name, mode, sdev->debugfs_root, dfse, 406 &sof_dfs_fops); 407 /* add to dfsentry list */ 408 list_add(&dfse->list, &sdev->dfsentry_list); 409 410 return 0; 411 } 412 EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item); 413 414 int snd_sof_dbg_init(struct snd_sof_dev *sdev) 415 { 416 const struct snd_sof_dsp_ops *ops = sof_ops(sdev); 417 const struct snd_sof_debugfs_map *map; 418 int i; 419 int err; 420 421 /* use "sof" as top level debugFS dir */ 422 sdev->debugfs_root = debugfs_create_dir("sof", NULL); 423 424 /* init dfsentry list */ 425 INIT_LIST_HEAD(&sdev->dfsentry_list); 426 427 /* create debugFS files for platform specific MMIO/DSP memories */ 428 for (i = 0; i < ops->debug_map_count; i++) { 429 map = &ops->debug_map[i]; 430 431 err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] + 432 map->offset, map->size, 433 map->name, map->access_type); 434 /* errors are only due to memory allocation, not debugfs */ 435 if (err < 0) 436 return err; 437 } 438 439 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST) 440 /* create read-write ipc_flood_count debugfs entry */ 441 err = snd_sof_debugfs_buf_item(sdev, NULL, 0, 442 "ipc_flood_count", 0666); 443 444 /* errors are only due to memory allocation, not debugfs */ 445 if (err < 0) 446 return err; 447 448 /* create read-write ipc_flood_duration_ms debugfs entry */ 449 err = snd_sof_debugfs_buf_item(sdev, NULL, 0, 450 "ipc_flood_duration_ms", 0666); 451 452 /* errors are only due to memory allocation, not debugfs */ 453 if (err < 0) 454 return err; 455 #endif 456 457 return 0; 458 } 459 EXPORT_SYMBOL_GPL(snd_sof_dbg_init); 460 461 void snd_sof_free_debug(struct snd_sof_dev *sdev) 462 { 463 debugfs_remove_recursive(sdev->debugfs_root); 464 } 465 EXPORT_SYMBOL_GPL(snd_sof_free_debug); 466 467 void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev) 468 { 469 if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) || 470 (sof_core_debug & SOF_DBG_RETAIN_CTX)) { 471 /* should we prevent DSP entering D3 ? */ 472 dev_info(sdev->dev, "info: preventing DSP entering D3 state to preserve context\n"); 473 pm_runtime_get_noresume(sdev->dev); 474 } 475 476 /* dump vital information to the logs */ 477 snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX); 478 snd_sof_ipc_dump(sdev); 479 snd_sof_trace_notify_for_error(sdev); 480 } 481 EXPORT_SYMBOL(snd_sof_handle_fw_exception); 482