1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. 4 * 5 * Description: CoreSight Program Flow Trace driver 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/moduleparam.h> 10 #include <linux/init.h> 11 #include <linux/types.h> 12 #include <linux/device.h> 13 #include <linux/io.h> 14 #include <linux/err.h> 15 #include <linux/fs.h> 16 #include <linux/slab.h> 17 #include <linux/delay.h> 18 #include <linux/smp.h> 19 #include <linux/sysfs.h> 20 #include <linux/stat.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/cpu.h> 23 #include <linux/of.h> 24 #include <linux/coresight.h> 25 #include <linux/coresight-pmu.h> 26 #include <linux/amba/bus.h> 27 #include <linux/seq_file.h> 28 #include <linux/uaccess.h> 29 #include <linux/clk.h> 30 #include <linux/perf_event.h> 31 #include <asm/sections.h> 32 33 #include "coresight-etm.h" 34 #include "coresight-etm-perf.h" 35 #include "coresight-trace-id.h" 36 37 /* 38 * Not really modular but using module_param is the easiest way to 39 * remain consistent with existing use cases for now. 40 */ 41 static int boot_enable; 42 module_param_named(boot_enable, boot_enable, int, S_IRUGO); 43 44 static struct etm_drvdata *etmdrvdata[NR_CPUS]; 45 46 static enum cpuhp_state hp_online; 47 48 /* 49 * Memory mapped writes to clear os lock are not supported on some processors 50 * and OS lock must be unlocked before any memory mapped access on such 51 * processors, otherwise memory mapped reads/writes will be invalid. 52 */ 53 static void etm_os_unlock(struct etm_drvdata *drvdata) 54 { 55 /* Writing any value to ETMOSLAR unlocks the trace registers */ 56 etm_writel(drvdata, 0x0, ETMOSLAR); 57 drvdata->os_unlock = true; 58 isb(); 59 } 60 61 static void etm_set_pwrdwn(struct etm_drvdata *drvdata) 62 { 63 u32 etmcr; 64 65 /* Ensure pending cp14 accesses complete before setting pwrdwn */ 66 mb(); 67 isb(); 68 etmcr = etm_readl(drvdata, ETMCR); 69 etmcr |= ETMCR_PWD_DWN; 70 etm_writel(drvdata, etmcr, ETMCR); 71 } 72 73 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) 74 { 75 u32 etmcr; 76 77 etmcr = etm_readl(drvdata, ETMCR); 78 etmcr &= ~ETMCR_PWD_DWN; 79 etm_writel(drvdata, etmcr, ETMCR); 80 /* Ensure pwrup completes before subsequent cp14 accesses */ 81 mb(); 82 isb(); 83 } 84 85 static void etm_set_pwrup(struct etm_drvdata *drvdata) 86 { 87 u32 etmpdcr; 88 89 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); 90 etmpdcr |= ETMPDCR_PWD_UP; 91 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); 92 /* Ensure pwrup completes before subsequent cp14 accesses */ 93 mb(); 94 isb(); 95 } 96 97 static void etm_clr_pwrup(struct etm_drvdata *drvdata) 98 { 99 u32 etmpdcr; 100 101 /* Ensure pending cp14 accesses complete before clearing pwrup */ 102 mb(); 103 isb(); 104 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); 105 etmpdcr &= ~ETMPDCR_PWD_UP; 106 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); 107 } 108 109 /** 110 * coresight_timeout_etm - loop until a bit has changed to a specific state. 111 * @drvdata: etm's private data structure. 112 * @offset: address of a register, starting from @addr. 113 * @position: the position of the bit of interest. 114 * @value: the value the bit should have. 115 * 116 * Basically the same as @coresight_timeout except for the register access 117 * method where we have to account for CP14 configurations. 118 * 119 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if 120 * TIMEOUT_US has elapsed, which ever happens first. 121 */ 122 123 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, 124 int position, int value) 125 { 126 int i; 127 u32 val; 128 129 for (i = TIMEOUT_US; i > 0; i--) { 130 val = etm_readl(drvdata, offset); 131 /* Waiting on the bit to go from 0 to 1 */ 132 if (value) { 133 if (val & BIT(position)) 134 return 0; 135 /* Waiting on the bit to go from 1 to 0 */ 136 } else { 137 if (!(val & BIT(position))) 138 return 0; 139 } 140 141 /* 142 * Delay is arbitrary - the specification doesn't say how long 143 * we are expected to wait. Extra check required to make sure 144 * we don't wait needlessly on the last iteration. 145 */ 146 if (i - 1) 147 udelay(1); 148 } 149 150 return -EAGAIN; 151 } 152 153 154 static void etm_set_prog(struct etm_drvdata *drvdata) 155 { 156 u32 etmcr; 157 158 etmcr = etm_readl(drvdata, ETMCR); 159 etmcr |= ETMCR_ETM_PRG; 160 etm_writel(drvdata, etmcr, ETMCR); 161 /* 162 * Recommended by spec for cp14 accesses to ensure etmcr write is 163 * complete before polling etmsr 164 */ 165 isb(); 166 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) { 167 dev_err(&drvdata->csdev->dev, 168 "%s: timeout observed when probing at offset %#x\n", 169 __func__, ETMSR); 170 } 171 } 172 173 static void etm_clr_prog(struct etm_drvdata *drvdata) 174 { 175 u32 etmcr; 176 177 etmcr = etm_readl(drvdata, ETMCR); 178 etmcr &= ~ETMCR_ETM_PRG; 179 etm_writel(drvdata, etmcr, ETMCR); 180 /* 181 * Recommended by spec for cp14 accesses to ensure etmcr write is 182 * complete before polling etmsr 183 */ 184 isb(); 185 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { 186 dev_err(&drvdata->csdev->dev, 187 "%s: timeout observed when probing at offset %#x\n", 188 __func__, ETMSR); 189 } 190 } 191 192 void etm_set_default(struct etm_config *config) 193 { 194 int i; 195 196 if (WARN_ON_ONCE(!config)) 197 return; 198 199 /* 200 * Taken verbatim from the TRM: 201 * 202 * To trace all memory: 203 * set bit [24] in register 0x009, the ETMTECR1, to 1 204 * set all other bits in register 0x009, the ETMTECR1, to 0 205 * set all bits in register 0x007, the ETMTECR2, to 0 206 * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). 207 */ 208 config->enable_ctrl1 = ETMTECR1_INC_EXC; 209 config->enable_ctrl2 = 0x0; 210 config->enable_event = ETM_HARD_WIRE_RES_A; 211 212 config->trigger_event = ETM_DEFAULT_EVENT_VAL; 213 config->enable_event = ETM_HARD_WIRE_RES_A; 214 215 config->seq_12_event = ETM_DEFAULT_EVENT_VAL; 216 config->seq_21_event = ETM_DEFAULT_EVENT_VAL; 217 config->seq_23_event = ETM_DEFAULT_EVENT_VAL; 218 config->seq_31_event = ETM_DEFAULT_EVENT_VAL; 219 config->seq_32_event = ETM_DEFAULT_EVENT_VAL; 220 config->seq_13_event = ETM_DEFAULT_EVENT_VAL; 221 config->timestamp_event = ETM_DEFAULT_EVENT_VAL; 222 223 for (i = 0; i < ETM_MAX_CNTR; i++) { 224 config->cntr_rld_val[i] = 0x0; 225 config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; 226 config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; 227 config->cntr_val[i] = 0x0; 228 } 229 230 config->seq_curr_state = 0x0; 231 config->ctxid_idx = 0x0; 232 for (i = 0; i < ETM_MAX_CTXID_CMP; i++) 233 config->ctxid_pid[i] = 0x0; 234 235 config->ctxid_mask = 0x0; 236 /* Setting default to 1024 as per TRM recommendation */ 237 config->sync_freq = 0x400; 238 } 239 240 void etm_config_trace_mode(struct etm_config *config) 241 { 242 u32 flags, mode; 243 244 mode = config->mode; 245 246 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); 247 248 /* excluding kernel AND user space doesn't make sense */ 249 if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 250 return; 251 252 /* nothing to do if neither flags are set */ 253 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) 254 return; 255 256 flags = (1 << 0 | /* instruction execute */ 257 3 << 3 | /* ARM instruction */ 258 0 << 5 | /* No data value comparison */ 259 0 << 7 | /* No exact mach */ 260 0 << 8); /* Ignore context ID */ 261 262 /* No need to worry about single address comparators. */ 263 config->enable_ctrl2 = 0x0; 264 265 /* Bit 0 is address range comparator 1 */ 266 config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; 267 268 /* 269 * On ETMv3.5: 270 * ETMACTRn[13,11] == Non-secure state comparison control 271 * ETMACTRn[12,10] == Secure state comparison control 272 * 273 * b00 == Match in all modes in this state 274 * b01 == Do not match in any more in this state 275 * b10 == Match in all modes excepts user mode in this state 276 * b11 == Match only in user mode in this state 277 */ 278 279 /* Tracing in secure mode is not supported at this time */ 280 flags |= (0 << 12 | 1 << 10); 281 282 if (mode & ETM_MODE_EXCL_USER) { 283 /* exclude user, match all modes except user mode */ 284 flags |= (1 << 13 | 0 << 11); 285 } else { 286 /* exclude kernel, match only in user mode */ 287 flags |= (1 << 13 | 1 << 11); 288 } 289 290 /* 291 * The ETMEEVR register is already set to "hard wire A". As such 292 * all there is to do is setup an address comparator that spans 293 * the entire address range and configure the state and mode bits. 294 */ 295 config->addr_val[0] = (u32) 0x0; 296 config->addr_val[1] = (u32) ~0x0; 297 config->addr_acctype[0] = flags; 298 config->addr_acctype[1] = flags; 299 config->addr_type[0] = ETM_ADDR_TYPE_RANGE; 300 config->addr_type[1] = ETM_ADDR_TYPE_RANGE; 301 } 302 303 #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \ 304 ETMCR_TIMESTAMP_EN | \ 305 ETMCR_RETURN_STACK) 306 307 static int etm_parse_event_config(struct etm_drvdata *drvdata, 308 struct perf_event *event) 309 { 310 struct etm_config *config = &drvdata->config; 311 struct perf_event_attr *attr = &event->attr; 312 313 if (!attr) 314 return -EINVAL; 315 316 /* Clear configuration from previous run */ 317 memset(config, 0, sizeof(struct etm_config)); 318 319 if (attr->exclude_kernel) 320 config->mode = ETM_MODE_EXCL_KERN; 321 322 if (attr->exclude_user) 323 config->mode = ETM_MODE_EXCL_USER; 324 325 /* Always start from the default config */ 326 etm_set_default(config); 327 328 /* 329 * By default the tracers are configured to trace the whole address 330 * range. Narrow the field only if requested by user space. 331 */ 332 if (config->mode) 333 etm_config_trace_mode(config); 334 335 /* 336 * At this time only cycle accurate, return stack and timestamp 337 * options are available. 338 */ 339 if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) 340 return -EINVAL; 341 342 config->ctrl = attr->config; 343 344 /* Don't trace contextID when runs in non-root PID namespace */ 345 if (!task_is_in_init_pid_ns(current)) 346 config->ctrl &= ~ETMCR_CTXID_SIZE; 347 348 /* 349 * Possible to have cores with PTM (supports ret stack) and ETM 350 * (never has ret stack) on the same SoC. So if we have a request 351 * for return stack that can't be honoured on this core then 352 * clear the bit - trace will still continue normally 353 */ 354 if ((config->ctrl & ETMCR_RETURN_STACK) && 355 !(drvdata->etmccer & ETMCCER_RETSTACK)) 356 config->ctrl &= ~ETMCR_RETURN_STACK; 357 358 return 0; 359 } 360 361 static int etm_enable_hw(struct etm_drvdata *drvdata) 362 { 363 int i, rc; 364 u32 etmcr; 365 struct etm_config *config = &drvdata->config; 366 struct coresight_device *csdev = drvdata->csdev; 367 368 CS_UNLOCK(drvdata->base); 369 370 rc = coresight_claim_device_unlocked(csdev); 371 if (rc) 372 goto done; 373 374 /* Turn engine on */ 375 etm_clr_pwrdwn(drvdata); 376 /* Apply power to trace registers */ 377 etm_set_pwrup(drvdata); 378 /* Make sure all registers are accessible */ 379 etm_os_unlock(drvdata); 380 381 etm_set_prog(drvdata); 382 383 etmcr = etm_readl(drvdata, ETMCR); 384 /* Clear setting from a previous run if need be */ 385 etmcr &= ~ETM3X_SUPPORTED_OPTIONS; 386 etmcr |= drvdata->port_size; 387 etmcr |= ETMCR_ETM_EN; 388 etm_writel(drvdata, config->ctrl | etmcr, ETMCR); 389 etm_writel(drvdata, config->trigger_event, ETMTRIGGER); 390 etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); 391 etm_writel(drvdata, config->enable_event, ETMTEEVR); 392 etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); 393 etm_writel(drvdata, config->fifofull_level, ETMFFLR); 394 for (i = 0; i < drvdata->nr_addr_cmp; i++) { 395 etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); 396 etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); 397 } 398 for (i = 0; i < drvdata->nr_cntr; i++) { 399 etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); 400 etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); 401 etm_writel(drvdata, config->cntr_rld_event[i], 402 ETMCNTRLDEVRn(i)); 403 etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); 404 } 405 etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); 406 etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); 407 etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); 408 etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); 409 etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); 410 etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); 411 etm_writel(drvdata, config->seq_curr_state, ETMSQR); 412 for (i = 0; i < drvdata->nr_ext_out; i++) 413 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); 414 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) 415 etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); 416 etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); 417 etm_writel(drvdata, config->sync_freq, ETMSYNCFR); 418 /* No external input selected */ 419 etm_writel(drvdata, 0x0, ETMEXTINSELR); 420 etm_writel(drvdata, config->timestamp_event, ETMTSEVR); 421 /* No auxiliary control selected */ 422 etm_writel(drvdata, 0x0, ETMAUXCR); 423 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); 424 /* No VMID comparator value selected */ 425 etm_writel(drvdata, 0x0, ETMVMIDCVR); 426 427 etm_clr_prog(drvdata); 428 429 done: 430 CS_LOCK(drvdata->base); 431 432 dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n", 433 drvdata->cpu, rc); 434 return rc; 435 } 436 437 struct etm_enable_arg { 438 struct etm_drvdata *drvdata; 439 int rc; 440 }; 441 442 static void etm_enable_hw_smp_call(void *info) 443 { 444 struct etm_enable_arg *arg = info; 445 446 if (WARN_ON(!arg)) 447 return; 448 arg->rc = etm_enable_hw(arg->drvdata); 449 } 450 451 static int etm_cpu_id(struct coresight_device *csdev) 452 { 453 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 454 455 return drvdata->cpu; 456 } 457 458 int etm_read_alloc_trace_id(struct etm_drvdata *drvdata) 459 { 460 int trace_id; 461 462 /* 463 * This will allocate a trace ID to the cpu, 464 * or return the one currently allocated. 465 * 466 * trace id function has its own lock 467 */ 468 trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu); 469 if (IS_VALID_CS_TRACE_ID(trace_id)) 470 drvdata->traceid = (u8)trace_id; 471 else 472 dev_err(&drvdata->csdev->dev, 473 "Failed to allocate trace ID for %s on CPU%d\n", 474 dev_name(&drvdata->csdev->dev), drvdata->cpu); 475 return trace_id; 476 } 477 478 void etm_release_trace_id(struct etm_drvdata *drvdata) 479 { 480 coresight_trace_id_put_cpu_id(drvdata->cpu); 481 } 482 483 static int etm_enable_perf(struct coresight_device *csdev, 484 struct perf_event *event, 485 struct coresight_trace_id_map *id_map) 486 { 487 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 488 int trace_id; 489 490 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 491 return -EINVAL; 492 493 /* Configure the tracer based on the session's specifics */ 494 etm_parse_event_config(drvdata, event); 495 496 /* 497 * perf allocates cpu ids as part of _setup_aux() - device needs to use 498 * the allocated ID. This reads the current version without allocation. 499 * 500 * This does not use the trace id lock to prevent lock_dep issues 501 * with perf locks - we know the ID cannot change until perf shuts down 502 * the session 503 */ 504 trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map); 505 if (!IS_VALID_CS_TRACE_ID(trace_id)) { 506 dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", 507 dev_name(&drvdata->csdev->dev), drvdata->cpu); 508 return -EINVAL; 509 } 510 drvdata->traceid = (u8)trace_id; 511 512 /* And enable it */ 513 return etm_enable_hw(drvdata); 514 } 515 516 static int etm_enable_sysfs(struct coresight_device *csdev) 517 { 518 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 519 struct etm_enable_arg arg = { }; 520 int ret; 521 522 spin_lock(&drvdata->spinlock); 523 524 /* sysfs needs to allocate and set a trace ID */ 525 ret = etm_read_alloc_trace_id(drvdata); 526 if (ret < 0) 527 goto unlock_enable_sysfs; 528 529 /* 530 * Configure the ETM only if the CPU is online. If it isn't online 531 * hw configuration will take place on the local CPU during bring up. 532 */ 533 if (cpu_online(drvdata->cpu)) { 534 arg.drvdata = drvdata; 535 ret = smp_call_function_single(drvdata->cpu, 536 etm_enable_hw_smp_call, &arg, 1); 537 if (!ret) 538 ret = arg.rc; 539 if (!ret) 540 drvdata->sticky_enable = true; 541 } else { 542 ret = -ENODEV; 543 } 544 545 if (ret) 546 etm_release_trace_id(drvdata); 547 548 unlock_enable_sysfs: 549 spin_unlock(&drvdata->spinlock); 550 551 if (!ret) 552 dev_dbg(&csdev->dev, "ETM tracing enabled\n"); 553 return ret; 554 } 555 556 static int etm_enable(struct coresight_device *csdev, struct perf_event *event, 557 enum cs_mode mode, struct coresight_trace_id_map *id_map) 558 { 559 int ret; 560 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 561 562 if (!coresight_take_mode(csdev, mode)) { 563 /* Someone is already using the tracer */ 564 return -EBUSY; 565 } 566 567 switch (mode) { 568 case CS_MODE_SYSFS: 569 ret = etm_enable_sysfs(csdev); 570 break; 571 case CS_MODE_PERF: 572 ret = etm_enable_perf(csdev, event, id_map); 573 break; 574 default: 575 ret = -EINVAL; 576 } 577 578 /* The tracer didn't start */ 579 if (ret) 580 coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED); 581 582 return ret; 583 } 584 585 static void etm_disable_hw(void *info) 586 { 587 int i; 588 struct etm_drvdata *drvdata = info; 589 struct etm_config *config = &drvdata->config; 590 struct coresight_device *csdev = drvdata->csdev; 591 592 CS_UNLOCK(drvdata->base); 593 etm_set_prog(drvdata); 594 595 /* Read back sequencer and counters for post trace analysis */ 596 config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); 597 598 for (i = 0; i < drvdata->nr_cntr; i++) 599 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); 600 601 etm_set_pwrdwn(drvdata); 602 coresight_disclaim_device_unlocked(csdev); 603 604 CS_LOCK(drvdata->base); 605 606 dev_dbg(&drvdata->csdev->dev, 607 "cpu: %d disable smp call done\n", drvdata->cpu); 608 } 609 610 static void etm_disable_perf(struct coresight_device *csdev) 611 { 612 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 613 614 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 615 return; 616 617 CS_UNLOCK(drvdata->base); 618 619 /* Setting the prog bit disables tracing immediately */ 620 etm_set_prog(drvdata); 621 622 /* 623 * There is no way to know when the tracer will be used again so 624 * power down the tracer. 625 */ 626 etm_set_pwrdwn(drvdata); 627 coresight_disclaim_device_unlocked(csdev); 628 629 CS_LOCK(drvdata->base); 630 631 /* 632 * perf will release trace ids when _free_aux() 633 * is called at the end of the session 634 */ 635 636 } 637 638 static void etm_disable_sysfs(struct coresight_device *csdev) 639 { 640 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 641 642 /* 643 * Taking hotplug lock here protects from clocks getting disabled 644 * with tracing being left on (crash scenario) if user disable occurs 645 * after cpu online mask indicates the cpu is offline but before the 646 * DYING hotplug callback is serviced by the ETM driver. 647 */ 648 cpus_read_lock(); 649 spin_lock(&drvdata->spinlock); 650 651 /* 652 * Executing etm_disable_hw on the cpu whose ETM is being disabled 653 * ensures that register writes occur when cpu is powered. 654 */ 655 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); 656 657 spin_unlock(&drvdata->spinlock); 658 cpus_read_unlock(); 659 660 /* 661 * we only release trace IDs when resetting sysfs. 662 * This permits sysfs users to read the trace ID after the trace 663 * session has completed. This maintains operational behaviour with 664 * prior trace id allocation method 665 */ 666 667 dev_dbg(&csdev->dev, "ETM tracing disabled\n"); 668 } 669 670 static void etm_disable(struct coresight_device *csdev, 671 struct perf_event *event) 672 { 673 enum cs_mode mode; 674 675 /* 676 * For as long as the tracer isn't disabled another entity can't 677 * change its status. As such we can read the status here without 678 * fearing it will change under us. 679 */ 680 mode = coresight_get_mode(csdev); 681 682 switch (mode) { 683 case CS_MODE_DISABLED: 684 break; 685 case CS_MODE_SYSFS: 686 etm_disable_sysfs(csdev); 687 break; 688 case CS_MODE_PERF: 689 etm_disable_perf(csdev); 690 break; 691 default: 692 WARN_ON_ONCE(mode); 693 return; 694 } 695 696 if (mode) 697 coresight_set_mode(csdev, CS_MODE_DISABLED); 698 } 699 700 static const struct coresight_ops_source etm_source_ops = { 701 .cpu_id = etm_cpu_id, 702 .enable = etm_enable, 703 .disable = etm_disable, 704 }; 705 706 static const struct coresight_ops etm_cs_ops = { 707 .source_ops = &etm_source_ops, 708 }; 709 710 static int etm_online_cpu(unsigned int cpu) 711 { 712 if (!etmdrvdata[cpu]) 713 return 0; 714 715 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) 716 coresight_enable_sysfs(etmdrvdata[cpu]->csdev); 717 return 0; 718 } 719 720 static int etm_starting_cpu(unsigned int cpu) 721 { 722 if (!etmdrvdata[cpu]) 723 return 0; 724 725 spin_lock(&etmdrvdata[cpu]->spinlock); 726 if (!etmdrvdata[cpu]->os_unlock) { 727 etm_os_unlock(etmdrvdata[cpu]); 728 etmdrvdata[cpu]->os_unlock = true; 729 } 730 731 if (coresight_get_mode(etmdrvdata[cpu]->csdev)) 732 etm_enable_hw(etmdrvdata[cpu]); 733 spin_unlock(&etmdrvdata[cpu]->spinlock); 734 return 0; 735 } 736 737 static int etm_dying_cpu(unsigned int cpu) 738 { 739 if (!etmdrvdata[cpu]) 740 return 0; 741 742 spin_lock(&etmdrvdata[cpu]->spinlock); 743 if (coresight_get_mode(etmdrvdata[cpu]->csdev)) 744 etm_disable_hw(etmdrvdata[cpu]); 745 spin_unlock(&etmdrvdata[cpu]->spinlock); 746 return 0; 747 } 748 749 static bool etm_arch_supported(u8 arch) 750 { 751 switch (arch) { 752 case ETM_ARCH_V3_3: 753 break; 754 case ETM_ARCH_V3_5: 755 break; 756 case PFT_ARCH_V1_0: 757 break; 758 case PFT_ARCH_V1_1: 759 break; 760 default: 761 return false; 762 } 763 return true; 764 } 765 766 static void etm_init_arch_data(void *info) 767 { 768 u32 etmidr; 769 u32 etmccr; 770 struct etm_drvdata *drvdata = info; 771 772 /* Make sure all registers are accessible */ 773 etm_os_unlock(drvdata); 774 775 CS_UNLOCK(drvdata->base); 776 777 /* First dummy read */ 778 (void)etm_readl(drvdata, ETMPDSR); 779 /* Provide power to ETM: ETMPDCR[3] == 1 */ 780 etm_set_pwrup(drvdata); 781 /* 782 * Clear power down bit since when this bit is set writes to 783 * certain registers might be ignored. 784 */ 785 etm_clr_pwrdwn(drvdata); 786 /* 787 * Set prog bit. It will be set from reset but this is included to 788 * ensure it is set 789 */ 790 etm_set_prog(drvdata); 791 792 /* Find all capabilities */ 793 etmidr = etm_readl(drvdata, ETMIDR); 794 drvdata->arch = BMVAL(etmidr, 4, 11); 795 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; 796 797 drvdata->etmccer = etm_readl(drvdata, ETMCCER); 798 etmccr = etm_readl(drvdata, ETMCCR); 799 drvdata->etmccr = etmccr; 800 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; 801 drvdata->nr_cntr = BMVAL(etmccr, 13, 15); 802 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); 803 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); 804 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); 805 806 etm_set_pwrdwn(drvdata); 807 etm_clr_pwrup(drvdata); 808 CS_LOCK(drvdata->base); 809 } 810 811 static int __init etm_hp_setup(void) 812 { 813 int ret; 814 815 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 816 "arm/coresight:starting", 817 etm_starting_cpu, etm_dying_cpu); 818 819 if (ret) 820 return ret; 821 822 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 823 "arm/coresight:online", 824 etm_online_cpu, NULL); 825 826 /* HP dyn state ID returned in ret on success */ 827 if (ret > 0) { 828 hp_online = ret; 829 return 0; 830 } 831 832 /* failed dyn state - remove others */ 833 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 834 835 return ret; 836 } 837 838 static void etm_hp_clear(void) 839 { 840 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 841 if (hp_online) { 842 cpuhp_remove_state_nocalls(hp_online); 843 hp_online = 0; 844 } 845 } 846 847 static int etm_probe(struct amba_device *adev, const struct amba_id *id) 848 { 849 int ret; 850 void __iomem *base; 851 struct device *dev = &adev->dev; 852 struct coresight_platform_data *pdata = NULL; 853 struct etm_drvdata *drvdata; 854 struct resource *res = &adev->res; 855 struct coresight_desc desc = { 0 }; 856 857 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 858 if (!drvdata) 859 return -ENOMEM; 860 861 drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14"); 862 dev_set_drvdata(dev, drvdata); 863 864 /* Validity for the resource is already checked by the AMBA core */ 865 base = devm_ioremap_resource(dev, res); 866 if (IS_ERR(base)) 867 return PTR_ERR(base); 868 869 drvdata->base = base; 870 desc.access = CSDEV_ACCESS_IOMEM(base); 871 872 spin_lock_init(&drvdata->spinlock); 873 874 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ 875 if (!IS_ERR(drvdata->atclk)) { 876 ret = clk_prepare_enable(drvdata->atclk); 877 if (ret) 878 return ret; 879 } 880 881 drvdata->cpu = coresight_get_cpu(dev); 882 if (drvdata->cpu < 0) 883 return drvdata->cpu; 884 885 desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu); 886 if (!desc.name) 887 return -ENOMEM; 888 889 if (smp_call_function_single(drvdata->cpu, 890 etm_init_arch_data, drvdata, 1)) 891 dev_err(dev, "ETM arch init failed\n"); 892 893 if (etm_arch_supported(drvdata->arch) == false) 894 return -EINVAL; 895 896 etm_set_default(&drvdata->config); 897 898 pdata = coresight_get_platform_data(dev); 899 if (IS_ERR(pdata)) 900 return PTR_ERR(pdata); 901 902 adev->dev.platform_data = pdata; 903 904 desc.type = CORESIGHT_DEV_TYPE_SOURCE; 905 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; 906 desc.ops = &etm_cs_ops; 907 desc.pdata = pdata; 908 desc.dev = dev; 909 desc.groups = coresight_etm_groups; 910 drvdata->csdev = coresight_register(&desc); 911 if (IS_ERR(drvdata->csdev)) 912 return PTR_ERR(drvdata->csdev); 913 914 ret = etm_perf_symlink(drvdata->csdev, true); 915 if (ret) { 916 coresight_unregister(drvdata->csdev); 917 return ret; 918 } 919 920 etmdrvdata[drvdata->cpu] = drvdata; 921 922 pm_runtime_put(&adev->dev); 923 dev_info(&drvdata->csdev->dev, 924 "%s initialized\n", (char *)coresight_get_uci_data(id)); 925 if (boot_enable) { 926 coresight_enable_sysfs(drvdata->csdev); 927 drvdata->boot_enable = true; 928 } 929 930 return 0; 931 } 932 933 static void clear_etmdrvdata(void *info) 934 { 935 int cpu = *(int *)info; 936 937 etmdrvdata[cpu] = NULL; 938 } 939 940 static void etm_remove(struct amba_device *adev) 941 { 942 struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); 943 944 etm_perf_symlink(drvdata->csdev, false); 945 946 /* 947 * Taking hotplug lock here to avoid racing between etm_remove and 948 * CPU hotplug call backs. 949 */ 950 cpus_read_lock(); 951 /* 952 * The readers for etmdrvdata[] are CPU hotplug call backs 953 * and PM notification call backs. Change etmdrvdata[i] on 954 * CPU i ensures these call backs has consistent view 955 * inside one call back function. 956 */ 957 if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1)) 958 etmdrvdata[drvdata->cpu] = NULL; 959 960 cpus_read_unlock(); 961 962 coresight_unregister(drvdata->csdev); 963 } 964 965 #ifdef CONFIG_PM 966 static int etm_runtime_suspend(struct device *dev) 967 { 968 struct etm_drvdata *drvdata = dev_get_drvdata(dev); 969 970 if (drvdata && !IS_ERR(drvdata->atclk)) 971 clk_disable_unprepare(drvdata->atclk); 972 973 return 0; 974 } 975 976 static int etm_runtime_resume(struct device *dev) 977 { 978 struct etm_drvdata *drvdata = dev_get_drvdata(dev); 979 980 if (drvdata && !IS_ERR(drvdata->atclk)) 981 clk_prepare_enable(drvdata->atclk); 982 983 return 0; 984 } 985 #endif 986 987 static const struct dev_pm_ops etm_dev_pm_ops = { 988 SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL) 989 }; 990 991 static const struct amba_id etm_ids[] = { 992 /* ETM 3.3 */ 993 CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"), 994 /* ETM 3.5 - Cortex-A5 */ 995 CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"), 996 /* ETM 3.5 */ 997 CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"), 998 /* PTM 1.0 */ 999 CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"), 1000 /* PTM 1.1 */ 1001 CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"), 1002 /* PTM 1.1 Qualcomm */ 1003 CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"), 1004 { 0, 0, NULL}, 1005 }; 1006 1007 MODULE_DEVICE_TABLE(amba, etm_ids); 1008 1009 static struct amba_driver etm_driver = { 1010 .drv = { 1011 .name = "coresight-etm3x", 1012 .pm = &etm_dev_pm_ops, 1013 .suppress_bind_attrs = true, 1014 }, 1015 .probe = etm_probe, 1016 .remove = etm_remove, 1017 .id_table = etm_ids, 1018 }; 1019 1020 static int __init etm_init(void) 1021 { 1022 int ret; 1023 1024 ret = etm_hp_setup(); 1025 1026 /* etm_hp_setup() does its own cleanup - exit on error */ 1027 if (ret) 1028 return ret; 1029 1030 ret = amba_driver_register(&etm_driver); 1031 if (ret) { 1032 pr_err("Error registering etm3x driver\n"); 1033 etm_hp_clear(); 1034 } 1035 1036 return ret; 1037 } 1038 1039 static void __exit etm_exit(void) 1040 { 1041 amba_driver_unregister(&etm_driver); 1042 etm_hp_clear(); 1043 } 1044 1045 module_init(etm_init); 1046 module_exit(etm_exit); 1047 1048 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); 1049 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>"); 1050 MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver"); 1051 MODULE_LICENSE("GPL v2"); 1052