1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/bitfield.h> 8 #include <linux/bitops.h> 9 #include <linux/kernel.h> 10 #include <linux/kvm_host.h> 11 #include <linux/moduleparam.h> 12 #include <linux/init.h> 13 #include <linux/types.h> 14 #include <linux/device.h> 15 #include <linux/io.h> 16 #include <linux/err.h> 17 #include <linux/fs.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/smp.h> 21 #include <linux/sysfs.h> 22 #include <linux/stat.h> 23 #include <linux/clk.h> 24 #include <linux/cpu.h> 25 #include <linux/cpu_pm.h> 26 #include <linux/coresight.h> 27 #include <linux/coresight-pmu.h> 28 #include <linux/amba/bus.h> 29 #include <linux/seq_file.h> 30 #include <linux/uaccess.h> 31 #include <linux/perf_event.h> 32 #include <linux/platform_device.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/property.h> 35 #include <linux/clk/clk-conf.h> 36 37 #include <asm/barrier.h> 38 #include <asm/sections.h> 39 #include <asm/sysreg.h> 40 #include <asm/local.h> 41 #include <asm/virt.h> 42 43 #include "coresight-etm4x.h" 44 #include "coresight-etm-perf.h" 45 #include "coresight-etm4x-cfg.h" 46 #include "coresight-self-hosted-trace.h" 47 #include "coresight-syscfg.h" 48 #include "coresight-trace-id.h" 49 50 static int boot_enable; 51 module_param(boot_enable, int, 0444); 52 MODULE_PARM_DESC(boot_enable, "Enable tracing on boot"); 53 54 #define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */ 55 #define PARAM_PM_SAVE_NEVER 1 /* never save any state */ 56 #define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */ 57 58 static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE; 59 module_param(pm_save_enable, int, 0444); 60 MODULE_PARM_DESC(pm_save_enable, 61 "Save/restore state on power down: 1 = never, 2 = self-hosted"); 62 63 static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; 64 static void etm4_set_default_config(struct etmv4_config *config); 65 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, 66 struct perf_event *event); 67 static u64 etm4_get_access_type(struct etmv4_config *config); 68 69 static enum cpuhp_state hp_online; 70 71 struct etm4_init_arg { 72 struct device *dev; 73 struct csdev_access *csa; 74 }; 75 76 static DEFINE_PER_CPU(struct etm4_init_arg *, delayed_probe); 77 static int etm4_probe_cpu(unsigned int cpu); 78 79 /* 80 * Check if TRCSSPCICRn(i) is implemented for a given instance. 81 * 82 * TRCSSPCICRn is implemented only if : 83 * TRCSSPCICR<n> is present only if all of the following are true: 84 * TRCIDR4.NUMSSCC > n. 85 * TRCIDR4.NUMPC > 0b0000 . 86 * TRCSSCSR<n>.PC == 0b1 87 */ 88 static bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n) 89 { 90 return (n < drvdata->nr_ss_cmp) && 91 drvdata->nr_pe && 92 (drvdata->config.ss_status[n] & TRCSSCSRn_PC); 93 } 94 95 u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit) 96 { 97 u64 res = 0; 98 99 switch (offset) { 100 ETM4x_READ_SYSREG_CASES(res) 101 default : 102 pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n", 103 offset); 104 } 105 106 if (!_relaxed) 107 __io_ar(res); /* Imitate the !relaxed I/O helpers */ 108 109 return res; 110 } 111 112 void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) 113 { 114 if (!_relaxed) 115 __io_bw(); /* Imitate the !relaxed I/O helpers */ 116 if (!_64bit) 117 val &= GENMASK(31, 0); 118 119 switch (offset) { 120 ETM4x_WRITE_SYSREG_CASES(val) 121 default : 122 pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n", 123 offset); 124 } 125 } 126 127 static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit) 128 { 129 u64 res = 0; 130 131 switch (offset) { 132 ETE_READ_CASES(res) 133 default : 134 pr_warn_ratelimited("ete: trying to read unsupported register @%x\n", 135 offset); 136 } 137 138 if (!_relaxed) 139 __io_ar(res); /* Imitate the !relaxed I/O helpers */ 140 141 return res; 142 } 143 144 static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) 145 { 146 if (!_relaxed) 147 __io_bw(); /* Imitate the !relaxed I/O helpers */ 148 if (!_64bit) 149 val &= GENMASK(31, 0); 150 151 switch (offset) { 152 ETE_WRITE_CASES(val) 153 default : 154 pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n", 155 offset); 156 } 157 } 158 159 static void etm_detect_os_lock(struct etmv4_drvdata *drvdata, 160 struct csdev_access *csa) 161 { 162 u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR); 163 164 drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr); 165 } 166 167 static void etm_write_os_lock(struct etmv4_drvdata *drvdata, 168 struct csdev_access *csa, u32 val) 169 { 170 val = !!val; 171 172 switch (drvdata->os_lock_model) { 173 case ETM_OSLOCK_PRESENT: 174 etm4x_relaxed_write32(csa, val, TRCOSLAR); 175 break; 176 case ETM_OSLOCK_PE: 177 write_sysreg_s(val, SYS_OSLAR_EL1); 178 break; 179 default: 180 pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n", 181 smp_processor_id(), drvdata->os_lock_model); 182 fallthrough; 183 case ETM_OSLOCK_NI: 184 return; 185 } 186 isb(); 187 } 188 189 static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, 190 struct csdev_access *csa) 191 { 192 WARN_ON(drvdata->cpu != smp_processor_id()); 193 194 /* Writing 0 to OS Lock unlocks the trace unit registers */ 195 etm_write_os_lock(drvdata, csa, 0x0); 196 drvdata->os_unlock = true; 197 } 198 199 static void etm4_os_unlock(struct etmv4_drvdata *drvdata) 200 { 201 if (!WARN_ON(!drvdata->csdev)) 202 etm4_os_unlock_csa(drvdata, &drvdata->csdev->access); 203 } 204 205 static void etm4_os_lock(struct etmv4_drvdata *drvdata) 206 { 207 if (WARN_ON(!drvdata->csdev)) 208 return; 209 /* Writing 0x1 to OS Lock locks the trace registers */ 210 etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1); 211 drvdata->os_unlock = false; 212 } 213 214 static void etm4_cs_lock(struct etmv4_drvdata *drvdata, 215 struct csdev_access *csa) 216 { 217 /* Software Lock is only accessible via memory mapped interface */ 218 if (csa->io_mem) 219 CS_LOCK(csa->base); 220 } 221 222 static void etm4_cs_unlock(struct etmv4_drvdata *drvdata, 223 struct csdev_access *csa) 224 { 225 if (csa->io_mem) 226 CS_UNLOCK(csa->base); 227 } 228 229 static int etm4_cpu_id(struct coresight_device *csdev) 230 { 231 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 232 233 return drvdata->cpu; 234 } 235 236 void etm4_release_trace_id(struct etmv4_drvdata *drvdata) 237 { 238 coresight_trace_id_put_cpu_id(drvdata->cpu); 239 } 240 241 struct etm4_enable_arg { 242 struct etmv4_drvdata *drvdata; 243 int rc; 244 }; 245 246 /* 247 * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs. 248 * When the CPU supports FEAT_TRF, we could move the ETM to a trace 249 * prohibited state by filtering the Exception levels via TRFCR_EL1. 250 */ 251 static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) 252 { 253 u64 trfcr; 254 255 /* If the CPU doesn't support FEAT_TRF, nothing to do */ 256 if (!drvdata->trfcr) 257 return; 258 259 trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); 260 261 write_trfcr(trfcr); 262 kvm_tracing_set_el1_configuration(trfcr); 263 } 264 265 static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata) 266 { 267 u64 trfcr = drvdata->trfcr; 268 269 if (drvdata->config.mode & ETM_MODE_EXCL_KERN) 270 trfcr &= ~TRFCR_EL1_ExTRE; 271 if (drvdata->config.mode & ETM_MODE_EXCL_USER) 272 trfcr &= ~TRFCR_EL1_E0TRE; 273 274 return trfcr; 275 } 276 277 /* 278 * etm4x_allow_trace - Allow CPU tracing in the respective ELs, 279 * as configured by the drvdata->config.mode for the current 280 * session. Even though we have TRCVICTLR bits to filter the 281 * trace in the ELs, it doesn't prevent the ETM from generating 282 * a packet (e.g, TraceInfo) that might contain the addresses from 283 * the excluded levels. Thus we use the additional controls provided 284 * via the Trace Filtering controls (FEAT_TRF) to make sure no trace 285 * is generated for the excluded ELs. 286 */ 287 static void etm4x_allow_trace(struct etmv4_drvdata *drvdata) 288 { 289 u64 trfcr, guest_trfcr; 290 291 /* If the CPU doesn't support FEAT_TRF, nothing to do */ 292 if (!drvdata->trfcr) 293 return; 294 295 if (drvdata->config.mode & ETM_MODE_EXCL_HOST) 296 trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); 297 else 298 trfcr = etm4x_get_kern_user_filter(drvdata); 299 300 write_trfcr(trfcr); 301 302 /* Set filters for guests and pass to KVM */ 303 if (drvdata->config.mode & ETM_MODE_EXCL_GUEST) 304 guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); 305 else 306 guest_trfcr = etm4x_get_kern_user_filter(drvdata); 307 308 /* TRFCR_EL1 doesn't have CX so mask it out. */ 309 guest_trfcr &= ~TRFCR_EL2_CX; 310 kvm_tracing_set_el1_configuration(guest_trfcr); 311 } 312 313 #ifdef CONFIG_ETM4X_IMPDEF_FEATURE 314 315 #define HISI_HIP08_AMBA_ID 0x000b6d01 316 #define ETM4_AMBA_MASK 0xfffff 317 #define HISI_HIP08_CORE_COMMIT_MASK 0x3000 318 #define HISI_HIP08_CORE_COMMIT_SHIFT 12 319 #define HISI_HIP08_CORE_COMMIT_FULL 0b00 320 #define HISI_HIP08_CORE_COMMIT_LVL_1 0b01 321 #define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5) 322 323 struct etm4_arch_features { 324 void (*arch_callback)(bool enable); 325 }; 326 327 static bool etm4_hisi_match_pid(unsigned int id) 328 { 329 return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID; 330 } 331 332 static void etm4_hisi_config_core_commit(bool enable) 333 { 334 u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 : 335 HISI_HIP08_CORE_COMMIT_FULL; 336 u64 val; 337 338 /* 339 * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together 340 * to set core-commit, 2'b00 means cpu is at full speed, 2'b01, 341 * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1 342 * speed(minimun value). So bit 12 and 13 should be cleared together. 343 */ 344 val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG); 345 val &= ~HISI_HIP08_CORE_COMMIT_MASK; 346 val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT; 347 write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG); 348 } 349 350 static struct etm4_arch_features etm4_features[] = { 351 [ETM4_IMPDEF_HISI_CORE_COMMIT] = { 352 .arch_callback = etm4_hisi_config_core_commit, 353 }, 354 {}, 355 }; 356 357 static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) 358 { 359 struct etm4_arch_features *ftr; 360 int bit; 361 362 for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { 363 ftr = &etm4_features[bit]; 364 365 if (ftr->arch_callback) 366 ftr->arch_callback(true); 367 } 368 } 369 370 static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) 371 { 372 struct etm4_arch_features *ftr; 373 int bit; 374 375 for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { 376 ftr = &etm4_features[bit]; 377 378 if (ftr->arch_callback) 379 ftr->arch_callback(false); 380 } 381 } 382 383 static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, 384 struct csdev_access *csa) 385 { 386 /* 387 * TRCPIDR* registers are not required for ETMs with system 388 * instructions. They must be identified by the MIDR+REVIDRs. 389 * Skip the TRCPID checks for now. 390 */ 391 if (!csa->io_mem) 392 return; 393 394 if (etm4_hisi_match_pid(coresight_get_pid(csa))) 395 set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features); 396 } 397 #else 398 static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) 399 { 400 } 401 402 static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) 403 { 404 } 405 406 static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, 407 struct csdev_access *csa) 408 { 409 } 410 #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ 411 412 static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) 413 { 414 if (!csa->io_mem) 415 isb(); 416 } 417 418 /* 419 * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system 420 * instruction to access the trace unit, each access must be separated by a 421 * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of 422 * register updates", for system instructions section, in "Notes": 423 * 424 * "In particular, whenever disabling or enabling the trace unit, a poll of 425 * TRCSTATR needs explicit synchronization between each read of TRCSTATR" 426 */ 427 static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) 428 { 429 if (!csa->io_mem) 430 return coresight_timeout_action(csa, TRCSTATR, pos, val, 431 etm4x_sys_ins_barrier); 432 return coresight_timeout(csa, TRCSTATR, pos, val); 433 } 434 435 static int etm4_enable_trace_unit(struct etmv4_drvdata *drvdata) 436 { 437 struct coresight_device *csdev = drvdata->csdev; 438 struct device *etm_dev = &csdev->dev; 439 struct csdev_access *csa = &csdev->access; 440 441 /* 442 * ETE mandates that the TRCRSR is written to before 443 * enabling it. 444 */ 445 if (etm4x_is_ete(drvdata)) 446 etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR); 447 448 etm4x_allow_trace(drvdata); 449 /* Enable the trace unit */ 450 etm4x_relaxed_write32(csa, 1, TRCPRGCTLR); 451 452 /* Synchronize the register updates for sysreg access */ 453 if (!csa->io_mem) 454 isb(); 455 456 /* wait for TRCSTATR.IDLE to go back down to '0' */ 457 if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) { 458 dev_err(etm_dev, 459 "timeout while waiting for Idle Trace Status\n"); 460 return -ETIME; 461 } 462 463 /* 464 * As recommended by section 4.3.7 ("Synchronization when using the 465 * memory-mapped interface") of ARM IHI 0064D 466 */ 467 dsb(sy); 468 isb(); 469 470 return 0; 471 } 472 473 static int etm4_enable_hw(struct etmv4_drvdata *drvdata) 474 { 475 int i, rc; 476 struct etmv4_config *config = &drvdata->config; 477 struct coresight_device *csdev = drvdata->csdev; 478 struct device *etm_dev = &csdev->dev; 479 struct csdev_access *csa = &csdev->access; 480 481 482 etm4_cs_unlock(drvdata, csa); 483 etm4_enable_arch_specific(drvdata); 484 485 etm4_os_unlock(drvdata); 486 487 rc = coresight_claim_device_unlocked(csdev); 488 if (rc) 489 goto done; 490 491 /* Disable the trace unit before programming trace registers */ 492 etm4x_relaxed_write32(csa, 0, TRCPRGCTLR); 493 494 /* 495 * If we use system instructions, we need to synchronize the 496 * write to the TRCPRGCTLR, before accessing the TRCSTATR. 497 * See ARM IHI0064F, section 498 * "4.3.7 Synchronization of register updates" 499 */ 500 if (!csa->io_mem) 501 isb(); 502 503 /* wait for TRCSTATR.IDLE to go up */ 504 if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) 505 dev_err(etm_dev, 506 "timeout while waiting for Idle Trace Status\n"); 507 if (drvdata->nr_pe) 508 etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR); 509 etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR); 510 /* nothing specific implemented */ 511 etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR); 512 etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R); 513 etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R); 514 if (drvdata->stallctl) 515 etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR); 516 etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR); 517 etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR); 518 etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR); 519 etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR); 520 etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR); 521 etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR); 522 etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR); 523 etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR); 524 if (drvdata->nr_pe_cmp) 525 etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR); 526 for (i = 0; i < drvdata->nrseqstate - 1; i++) 527 etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i)); 528 if (drvdata->nrseqstate) { 529 etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR); 530 etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR); 531 } 532 if (drvdata->numextinsel) 533 etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR); 534 for (i = 0; i < drvdata->nr_cntr; i++) { 535 etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i)); 536 etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i)); 537 etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i)); 538 } 539 540 /* 541 * Resource selector pair 0 is always implemented and reserved. As 542 * such start at 2. 543 */ 544 for (i = 2; i < drvdata->nr_resource * 2; i++) 545 etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i)); 546 547 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 548 /* always clear status bit on restart if using single-shot */ 549 if (config->ss_ctrl[i] || config->ss_pe_cmp[i]) 550 config->ss_status[i] &= ~TRCSSCSRn_STATUS; 551 etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i)); 552 etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i)); 553 if (etm4x_sspcicrn_present(drvdata, i)) 554 etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i)); 555 } 556 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { 557 etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i)); 558 etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i)); 559 } 560 for (i = 0; i < drvdata->numcidc; i++) 561 etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i)); 562 etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0); 563 if (drvdata->numcidc > 4) 564 etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1); 565 566 for (i = 0; i < drvdata->numvmidc; i++) 567 etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i)); 568 etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0); 569 if (drvdata->numvmidc > 4) 570 etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1); 571 572 if (!drvdata->skip_power_up) { 573 u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR); 574 575 /* 576 * Request to keep the trace unit powered and also 577 * emulation of powerdown 578 */ 579 etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR); 580 } 581 582 if (!drvdata->paused) 583 rc = etm4_enable_trace_unit(drvdata); 584 done: 585 etm4_cs_lock(drvdata, csa); 586 587 dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n", 588 drvdata->cpu, rc); 589 return rc; 590 } 591 592 static void etm4_enable_hw_smp_call(void *info) 593 { 594 struct etm4_enable_arg *arg = info; 595 596 if (WARN_ON(!arg)) 597 return; 598 arg->rc = etm4_enable_hw(arg->drvdata); 599 } 600 601 /* 602 * The goal of function etm4_config_timestamp_event() is to configure a 603 * counter that will tell the tracer to emit a timestamp packet when it 604 * reaches zero. This is done in order to get a more fine grained idea 605 * of when instructions are executed so that they can be correlated 606 * with execution on other CPUs. 607 * 608 * To do this the counter itself is configured to self reload and 609 * TRCRSCTLR1 (always true) used to get the counter to decrement. From 610 * there a resource selector is configured with the counter and the 611 * timestamp control register to use the resource selector to trigger the 612 * event that will insert a timestamp packet in the stream. 613 */ 614 static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata) 615 { 616 int ctridx, ret = -EINVAL; 617 int counter, rselector; 618 u32 val = 0; 619 struct etmv4_config *config = &drvdata->config; 620 621 /* No point in trying if we don't have at least one counter */ 622 if (!drvdata->nr_cntr) 623 goto out; 624 625 /* Find a counter that hasn't been initialised */ 626 for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++) 627 if (config->cntr_val[ctridx] == 0) 628 break; 629 630 /* All the counters have been configured already, bail out */ 631 if (ctridx == drvdata->nr_cntr) { 632 pr_debug("%s: no available counter found\n", __func__); 633 ret = -ENOSPC; 634 goto out; 635 } 636 637 /* 638 * Searching for an available resource selector to use, starting at 639 * '2' since every implementation has at least 2 resource selector. 640 * ETMIDR4 gives the number of resource selector _pairs_, 641 * hence multiply by 2. 642 */ 643 for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++) 644 if (!config->res_ctrl[rselector]) 645 break; 646 647 if (rselector == drvdata->nr_resource * 2) { 648 pr_debug("%s: no available resource selector found\n", 649 __func__); 650 ret = -ENOSPC; 651 goto out; 652 } 653 654 /* Remember what counter we used */ 655 counter = 1 << ctridx; 656 657 /* 658 * Initialise original and reload counter value to the smallest 659 * possible value in order to get as much precision as we can. 660 */ 661 config->cntr_val[ctridx] = 1; 662 config->cntrldvr[ctridx] = 1; 663 664 /* Set the trace counter control register */ 665 val = 0x1 << 16 | /* Bit 16, reload counter automatically */ 666 0x0 << 7 | /* Select single resource selector */ 667 0x1; /* Resource selector 1, i.e always true */ 668 669 config->cntr_ctrl[ctridx] = val; 670 671 val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */ 672 counter << 0; /* Counter to use */ 673 674 config->res_ctrl[rselector] = val; 675 676 val = 0x0 << 7 | /* Select single resource selector */ 677 rselector; /* Resource selector */ 678 679 config->ts_ctrl = val; 680 681 ret = 0; 682 out: 683 return ret; 684 } 685 686 static int etm4_parse_event_config(struct coresight_device *csdev, 687 struct perf_event *event) 688 { 689 int ret = 0; 690 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 691 struct etmv4_config *config = &drvdata->config; 692 struct perf_event_attr *attr = &event->attr; 693 unsigned long cfg_hash; 694 int preset, cc_threshold; 695 696 /* Clear configuration from previous run */ 697 memset(config, 0, sizeof(struct etmv4_config)); 698 699 if (attr->exclude_kernel) 700 config->mode = ETM_MODE_EXCL_KERN; 701 702 if (attr->exclude_user) 703 config->mode = ETM_MODE_EXCL_USER; 704 705 if (attr->exclude_host) 706 config->mode |= ETM_MODE_EXCL_HOST; 707 708 if (attr->exclude_guest) 709 config->mode |= ETM_MODE_EXCL_GUEST; 710 711 /* Always start from the default config */ 712 etm4_set_default_config(config); 713 714 /* Configure filters specified on the perf cmd line, if any. */ 715 ret = etm4_set_event_filters(drvdata, event); 716 if (ret) 717 goto out; 718 719 /* Go from generic option to ETMv4 specifics */ 720 if (attr->config & BIT(ETM_OPT_CYCACC)) { 721 config->cfg |= TRCCONFIGR_CCI; 722 /* TRM: Must program this for cycacc to work */ 723 cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK; 724 if (!cc_threshold) 725 cc_threshold = ETM_CYC_THRESHOLD_DEFAULT; 726 if (cc_threshold < drvdata->ccitmin) 727 cc_threshold = drvdata->ccitmin; 728 config->ccctlr = cc_threshold; 729 } 730 if (attr->config & BIT(ETM_OPT_TS)) { 731 /* 732 * Configure timestamps to be emitted at regular intervals in 733 * order to correlate instructions executed on different CPUs 734 * (CPU-wide trace scenarios). 735 */ 736 ret = etm4_config_timestamp_event(drvdata); 737 738 /* 739 * No need to go further if timestamp intervals can't 740 * be configured. 741 */ 742 if (ret) 743 goto out; 744 745 /* bit[11], Global timestamp tracing bit */ 746 config->cfg |= TRCCONFIGR_TS; 747 } 748 749 /* Only trace contextID when runs in root PID namespace */ 750 if ((attr->config & BIT(ETM_OPT_CTXTID)) && 751 task_is_in_init_pid_ns(current)) 752 /* bit[6], Context ID tracing bit */ 753 config->cfg |= TRCCONFIGR_CID; 754 755 /* 756 * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID 757 * for recording CONTEXTIDR_EL2. Do not enable VMID tracing if the 758 * kernel is not running in EL2. 759 */ 760 if (attr->config & BIT(ETM_OPT_CTXTID2)) { 761 if (!is_kernel_in_hyp_mode()) { 762 ret = -EINVAL; 763 goto out; 764 } 765 /* Only trace virtual contextID when runs in root PID namespace */ 766 if (task_is_in_init_pid_ns(current)) 767 config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT; 768 } 769 770 /* return stack - enable if selected and supported */ 771 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack) 772 /* bit[12], Return stack enable bit */ 773 config->cfg |= TRCCONFIGR_RS; 774 775 /* 776 * Set any selected configuration and preset. 777 * 778 * This extracts the values of PMU_FORMAT_ATTR(configid) and PMU_FORMAT_ATTR(preset) 779 * in the perf attributes defined in coresight-etm-perf.c. 780 * configid uses bits 63:32 of attr->config2, preset uses bits 3:0 of attr->config. 781 * A zero configid means no configuration active, preset = 0 means no preset selected. 782 */ 783 if (attr->config2 & GENMASK_ULL(63, 32)) { 784 cfg_hash = (u32)(attr->config2 >> 32); 785 preset = attr->config & 0xF; 786 ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset); 787 } 788 789 /* branch broadcast - enable if selected and supported */ 790 if (attr->config & BIT(ETM_OPT_BRANCH_BROADCAST)) { 791 if (!drvdata->trcbb) { 792 /* 793 * Missing BB support could cause silent decode errors 794 * so fail to open if it's not supported. 795 */ 796 ret = -EINVAL; 797 goto out; 798 } else { 799 config->cfg |= BIT(ETM4_CFG_BIT_BB); 800 } 801 } 802 803 out: 804 return ret; 805 } 806 807 static int etm4_enable_perf(struct coresight_device *csdev, 808 struct perf_event *event, 809 struct coresight_path *path) 810 { 811 int ret = 0; 812 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 813 814 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) { 815 ret = -EINVAL; 816 goto out; 817 } 818 819 /* Configure the tracer based on the session's specifics */ 820 ret = etm4_parse_event_config(csdev, event); 821 if (ret) 822 goto out; 823 824 drvdata->trcid = path->trace_id; 825 826 /* Populate pause state */ 827 drvdata->paused = !!READ_ONCE(event->hw.aux_paused); 828 829 /* And enable it */ 830 ret = etm4_enable_hw(drvdata); 831 832 out: 833 return ret; 834 } 835 836 static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path) 837 { 838 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 839 struct etm4_enable_arg arg = { }; 840 unsigned long cfg_hash; 841 int ret, preset; 842 843 /* enable any config activated by configfs */ 844 cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset); 845 if (cfg_hash) { 846 ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset); 847 if (ret) 848 return ret; 849 } 850 851 raw_spin_lock(&drvdata->spinlock); 852 853 drvdata->trcid = path->trace_id; 854 855 /* Tracer will never be paused in sysfs mode */ 856 drvdata->paused = false; 857 858 /* 859 * Executing etm4_enable_hw on the cpu whose ETM is being enabled 860 * ensures that register writes occur when cpu is powered. 861 */ 862 arg.drvdata = drvdata; 863 ret = smp_call_function_single(drvdata->cpu, 864 etm4_enable_hw_smp_call, &arg, 1); 865 if (!ret) 866 ret = arg.rc; 867 if (!ret) 868 drvdata->sticky_enable = true; 869 870 if (ret) 871 etm4_release_trace_id(drvdata); 872 873 raw_spin_unlock(&drvdata->spinlock); 874 875 if (!ret) 876 dev_dbg(&csdev->dev, "ETM tracing enabled\n"); 877 return ret; 878 } 879 880 static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, 881 enum cs_mode mode, struct coresight_path *path) 882 { 883 int ret; 884 885 if (!coresight_take_mode(csdev, mode)) { 886 /* Someone is already using the tracer */ 887 return -EBUSY; 888 } 889 890 switch (mode) { 891 case CS_MODE_SYSFS: 892 ret = etm4_enable_sysfs(csdev, path); 893 break; 894 case CS_MODE_PERF: 895 ret = etm4_enable_perf(csdev, event, path); 896 break; 897 default: 898 ret = -EINVAL; 899 } 900 901 /* The tracer didn't start */ 902 if (ret) 903 coresight_set_mode(csdev, CS_MODE_DISABLED); 904 905 return ret; 906 } 907 908 static void etm4_disable_trace_unit(struct etmv4_drvdata *drvdata) 909 { 910 u32 control; 911 struct coresight_device *csdev = drvdata->csdev; 912 struct device *etm_dev = &csdev->dev; 913 struct csdev_access *csa = &csdev->access; 914 915 control = etm4x_relaxed_read32(csa, TRCPRGCTLR); 916 917 /* EN, bit[0] Trace unit enable bit */ 918 control &= ~0x1; 919 920 /* 921 * If the CPU supports v8.4 Trace filter Control, 922 * set the ETM to trace prohibited region. 923 */ 924 etm4x_prohibit_trace(drvdata); 925 /* 926 * Make sure everything completes before disabling, as recommended 927 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, 928 * SSTATUS") of ARM IHI 0064D 929 */ 930 dsb(sy); 931 isb(); 932 /* Trace synchronization barrier, is a nop if not supported */ 933 tsb_csync(); 934 etm4x_relaxed_write32(csa, control, TRCPRGCTLR); 935 936 /* 937 * As recommended by section 4.3.7 ("Synchronization when using system 938 * instructions to progrom the trace unit") of ARM IHI 0064H.b, the 939 * self-hosted trace analyzer must perform a Context synchronization 940 * event between writing to the TRCPRGCTLR and reading the TRCSTATR. 941 */ 942 if (!csa->io_mem) 943 isb(); 944 945 /* wait for TRCSTATR.PMSTABLE to go to '1' */ 946 if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) 947 dev_err(etm_dev, 948 "timeout while waiting for PM stable Trace Status\n"); 949 /* 950 * As recommended by section 4.3.7 (Synchronization of register updates) 951 * of ARM IHI 0064H.b. 952 */ 953 isb(); 954 } 955 956 static void etm4_disable_hw(void *info) 957 { 958 u32 control; 959 struct etmv4_drvdata *drvdata = info; 960 struct etmv4_config *config = &drvdata->config; 961 struct coresight_device *csdev = drvdata->csdev; 962 struct csdev_access *csa = &csdev->access; 963 int i; 964 965 etm4_cs_unlock(drvdata, csa); 966 etm4_disable_arch_specific(drvdata); 967 968 if (!drvdata->skip_power_up) { 969 /* power can be removed from the trace unit now */ 970 control = etm4x_relaxed_read32(csa, TRCPDCR); 971 control &= ~TRCPDCR_PU; 972 etm4x_relaxed_write32(csa, control, TRCPDCR); 973 } 974 975 etm4_disable_trace_unit(drvdata); 976 977 /* read the status of the single shot comparators */ 978 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 979 config->ss_status[i] = 980 etm4x_relaxed_read32(csa, TRCSSCSRn(i)); 981 } 982 983 /* read back the current counter values */ 984 for (i = 0; i < drvdata->nr_cntr; i++) { 985 config->cntr_val[i] = 986 etm4x_relaxed_read32(csa, TRCCNTVRn(i)); 987 } 988 989 coresight_disclaim_device_unlocked(csdev); 990 etm4_cs_lock(drvdata, csa); 991 992 dev_dbg(&drvdata->csdev->dev, 993 "cpu: %d disable smp call done\n", drvdata->cpu); 994 } 995 996 static int etm4_disable_perf(struct coresight_device *csdev, 997 struct perf_event *event) 998 { 999 u32 control; 1000 struct etm_filters *filters = event->hw.addr_filters; 1001 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1002 struct perf_event_attr *attr = &event->attr; 1003 1004 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 1005 return -EINVAL; 1006 1007 etm4_disable_hw(drvdata); 1008 /* 1009 * The config_id occupies bits 63:32 of the config2 perf event attr 1010 * field. If this is non-zero then we will have enabled a config. 1011 */ 1012 if (attr->config2 & GENMASK_ULL(63, 32)) 1013 cscfg_csdev_disable_active_config(csdev); 1014 1015 /* 1016 * Check if the start/stop logic was active when the unit was stopped. 1017 * That way we can re-enable the start/stop logic when the process is 1018 * scheduled again. Configuration of the start/stop logic happens in 1019 * function etm4_set_event_filters(). 1020 */ 1021 control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR); 1022 /* TRCVICTLR::SSSTATUS, bit[9] */ 1023 filters->ssstatus = (control & BIT(9)); 1024 1025 /* 1026 * perf will release trace ids when _free_aux() is 1027 * called at the end of the session. 1028 */ 1029 1030 return 0; 1031 } 1032 1033 static void etm4_disable_sysfs(struct coresight_device *csdev) 1034 { 1035 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1036 1037 /* 1038 * Taking hotplug lock here protects from clocks getting disabled 1039 * with tracing being left on (crash scenario) if user disable occurs 1040 * after cpu online mask indicates the cpu is offline but before the 1041 * DYING hotplug callback is serviced by the ETM driver. 1042 */ 1043 cpus_read_lock(); 1044 raw_spin_lock(&drvdata->spinlock); 1045 1046 /* 1047 * Executing etm4_disable_hw on the cpu whose ETM is being disabled 1048 * ensures that register writes occur when cpu is powered. 1049 */ 1050 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); 1051 1052 raw_spin_unlock(&drvdata->spinlock); 1053 1054 cscfg_csdev_disable_active_config(csdev); 1055 1056 cpus_read_unlock(); 1057 1058 /* 1059 * we only release trace IDs when resetting sysfs. 1060 * This permits sysfs users to read the trace ID after the trace 1061 * session has completed. This maintains operational behaviour with 1062 * prior trace id allocation method 1063 */ 1064 1065 dev_dbg(&csdev->dev, "ETM tracing disabled\n"); 1066 } 1067 1068 static void etm4_disable(struct coresight_device *csdev, 1069 struct perf_event *event) 1070 { 1071 enum cs_mode mode; 1072 1073 /* 1074 * For as long as the tracer isn't disabled another entity can't 1075 * change its status. As such we can read the status here without 1076 * fearing it will change under us. 1077 */ 1078 mode = coresight_get_mode(csdev); 1079 1080 switch (mode) { 1081 case CS_MODE_DISABLED: 1082 break; 1083 case CS_MODE_SYSFS: 1084 etm4_disable_sysfs(csdev); 1085 break; 1086 case CS_MODE_PERF: 1087 etm4_disable_perf(csdev, event); 1088 break; 1089 } 1090 1091 if (mode) 1092 coresight_set_mode(csdev, CS_MODE_DISABLED); 1093 } 1094 1095 static int etm4_resume_perf(struct coresight_device *csdev) 1096 { 1097 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1098 struct csdev_access *csa = &csdev->access; 1099 1100 if (coresight_get_mode(csdev) != CS_MODE_PERF) 1101 return -EINVAL; 1102 1103 etm4_cs_unlock(drvdata, csa); 1104 etm4_enable_trace_unit(drvdata); 1105 etm4_cs_lock(drvdata, csa); 1106 1107 drvdata->paused = false; 1108 return 0; 1109 } 1110 1111 static void etm4_pause_perf(struct coresight_device *csdev) 1112 { 1113 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1114 struct csdev_access *csa = &csdev->access; 1115 1116 if (coresight_get_mode(csdev) != CS_MODE_PERF) 1117 return; 1118 1119 etm4_cs_unlock(drvdata, csa); 1120 etm4_disable_trace_unit(drvdata); 1121 etm4_cs_lock(drvdata, csa); 1122 1123 drvdata->paused = true; 1124 } 1125 1126 static const struct coresight_ops_source etm4_source_ops = { 1127 .cpu_id = etm4_cpu_id, 1128 .enable = etm4_enable, 1129 .disable = etm4_disable, 1130 .resume_perf = etm4_resume_perf, 1131 .pause_perf = etm4_pause_perf, 1132 }; 1133 1134 static const struct coresight_ops etm4_cs_ops = { 1135 .trace_id = coresight_etm_get_trace_id, 1136 .source_ops = &etm4_source_ops, 1137 }; 1138 1139 static bool cpu_supports_sysreg_trace(void) 1140 { 1141 u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); 1142 1143 return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0; 1144 } 1145 1146 static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata, 1147 struct csdev_access *csa) 1148 { 1149 u32 devarch; 1150 1151 if (!cpu_supports_sysreg_trace()) 1152 return false; 1153 1154 /* 1155 * ETMs implementing sysreg access must implement TRCDEVARCH. 1156 */ 1157 devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH); 1158 switch (devarch & ETM_DEVARCH_ID_MASK) { 1159 case ETM_DEVARCH_ETMv4x_ARCH: 1160 *csa = (struct csdev_access) { 1161 .io_mem = false, 1162 .read = etm4x_sysreg_read, 1163 .write = etm4x_sysreg_write, 1164 }; 1165 break; 1166 case ETM_DEVARCH_ETE_ARCH: 1167 *csa = (struct csdev_access) { 1168 .io_mem = false, 1169 .read = ete_sysreg_read, 1170 .write = ete_sysreg_write, 1171 }; 1172 break; 1173 default: 1174 return false; 1175 } 1176 1177 drvdata->arch = etm_devarch_to_arch(devarch); 1178 return true; 1179 } 1180 1181 static bool is_devtype_cpu_trace(void __iomem *base) 1182 { 1183 u32 devtype = readl(base + TRCDEVTYPE); 1184 1185 return (devtype == CS_DEVTYPE_PE_TRACE); 1186 } 1187 1188 static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata, 1189 struct csdev_access *csa) 1190 { 1191 u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH); 1192 1193 if (!is_coresight_device(drvdata->base) || !is_devtype_cpu_trace(drvdata->base)) 1194 return false; 1195 1196 /* 1197 * All ETMs must implement TRCDEVARCH to indicate that 1198 * the component is an ETMv4. Even though TRCIDR1 also 1199 * contains the information, it is part of the "Trace" 1200 * register and must be accessed with the OSLK cleared, 1201 * with MMIO. But we cannot touch the OSLK until we are 1202 * sure this is an ETM. So rely only on the TRCDEVARCH. 1203 */ 1204 if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) { 1205 pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n"); 1206 return false; 1207 } 1208 1209 drvdata->arch = etm_devarch_to_arch(devarch); 1210 *csa = CSDEV_ACCESS_IOMEM(drvdata->base); 1211 return true; 1212 } 1213 1214 static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata, 1215 struct csdev_access *csa) 1216 { 1217 /* 1218 * Always choose the memory mapped io, if there is 1219 * a memory map to prevent sysreg access on broken 1220 * systems. 1221 */ 1222 if (drvdata->base) 1223 return etm4_init_iomem_access(drvdata, csa); 1224 1225 if (etm4_init_sysreg_access(drvdata, csa)) 1226 return true; 1227 1228 return false; 1229 } 1230 1231 static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) 1232 { 1233 u64 dfr0 = read_sysreg(id_aa64dfr0_el1); 1234 u64 trfcr; 1235 1236 drvdata->trfcr = 0; 1237 if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) 1238 return; 1239 1240 /* 1241 * If the CPU supports v8.4 SelfHosted Tracing, enable 1242 * tracing at the kernel EL and EL0, forcing to use the 1243 * virtual time as the timestamp. 1244 */ 1245 trfcr = (FIELD_PREP(TRFCR_EL1_TS_MASK, TRFCR_EL1_TS_VIRTUAL) | 1246 TRFCR_EL1_ExTRE | 1247 TRFCR_EL1_E0TRE); 1248 1249 /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */ 1250 if (is_kernel_in_hyp_mode()) 1251 trfcr |= TRFCR_EL2_CX; 1252 1253 drvdata->trfcr = trfcr; 1254 } 1255 1256 /* 1257 * The following errata on applicable cpu ranges, affect the CCITMIN filed 1258 * in TCRIDR3 register. Software read for the field returns 0x100 limiting 1259 * the cycle threshold granularity, whereas the right value should have 1260 * been 0x4, which is well supported in the hardware. 1261 */ 1262 static struct midr_range etm_wrong_ccitmin_cpus[] = { 1263 /* Erratum #1490853 - Cortex-A76 */ 1264 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0), 1265 /* Erratum #1490853 - Neoverse-N1 */ 1266 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0), 1267 /* Erratum #1491015 - Cortex-A77 */ 1268 MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0), 1269 /* Erratum #1502854 - Cortex-X1 */ 1270 MIDR_REV(MIDR_CORTEX_X1, 0, 0), 1271 /* Erratum #1619801 - Neoverse-V1 */ 1272 MIDR_REV(MIDR_NEOVERSE_V1, 0, 0), 1273 {}, 1274 }; 1275 1276 static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata) 1277 { 1278 /* 1279 * Erratum affected cpus will read 256 as the minimum 1280 * instruction trace cycle counting threshold whereas 1281 * the correct value should be 4 instead. Override the 1282 * recorded value for 'drvdata->ccitmin' to workaround 1283 * this problem. 1284 */ 1285 if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) { 1286 if (drvdata->ccitmin == 256) 1287 drvdata->ccitmin = 4; 1288 } 1289 } 1290 1291 static void etm4_init_arch_data(void *info) 1292 { 1293 u32 etmidr0; 1294 u32 etmidr2; 1295 u32 etmidr3; 1296 u32 etmidr4; 1297 u32 etmidr5; 1298 struct etm4_init_arg *init_arg = info; 1299 struct etmv4_drvdata *drvdata; 1300 struct csdev_access *csa; 1301 struct device *dev = init_arg->dev; 1302 int i; 1303 1304 drvdata = dev_get_drvdata(init_arg->dev); 1305 csa = init_arg->csa; 1306 1307 /* 1308 * If we are unable to detect the access mechanism, 1309 * or unable to detect the trace unit type, fail 1310 * early. 1311 */ 1312 if (!etm4_init_csdev_access(drvdata, csa)) 1313 return; 1314 1315 if (!csa->io_mem || 1316 fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up")) 1317 drvdata->skip_power_up = true; 1318 1319 /* Detect the support for OS Lock before we actually use it */ 1320 etm_detect_os_lock(drvdata, csa); 1321 1322 /* Make sure all registers are accessible */ 1323 etm4_os_unlock_csa(drvdata, csa); 1324 etm4_cs_unlock(drvdata, csa); 1325 1326 etm4_check_arch_features(drvdata, csa); 1327 1328 /* find all capabilities of the tracing unit */ 1329 etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0); 1330 1331 /* INSTP0, bits[2:1] P0 tracing support field */ 1332 drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11); 1333 /* TRCBB, bit[5] Branch broadcast tracing support bit */ 1334 drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB); 1335 /* TRCCOND, bit[6] Conditional instruction tracing support bit */ 1336 drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND); 1337 /* TRCCCI, bit[7] Cycle counting instruction bit */ 1338 drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI); 1339 /* RETSTACK, bit[9] Return stack bit */ 1340 drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK); 1341 /* NUMEVENT, bits[11:10] Number of events field */ 1342 drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0); 1343 /* QSUPP, bits[16:15] Q element support field */ 1344 drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0); 1345 if (drvdata->q_support) 1346 drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT); 1347 /* TSSIZE, bits[28:24] Global timestamp size field */ 1348 drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0); 1349 1350 /* maximum size of resources */ 1351 etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2); 1352 /* CIDSIZE, bits[9:5] Indicates the Context ID size */ 1353 drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2); 1354 /* VMIDSIZE, bits[14:10] Indicates the VMID size */ 1355 drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2); 1356 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */ 1357 drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2); 1358 1359 etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3); 1360 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */ 1361 drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3); 1362 etm4_fixup_wrong_ccitmin(drvdata); 1363 1364 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */ 1365 drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3); 1366 drvdata->config.s_ex_level = drvdata->s_ex_level; 1367 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */ 1368 drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3); 1369 /* 1370 * TRCERR, bit[24] whether a trace unit can trace a 1371 * system error exception. 1372 */ 1373 drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR); 1374 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */ 1375 drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR); 1376 /* STALLCTL, bit[26] is stall control implemented? */ 1377 drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL); 1378 /* SYSSTALL, bit[27] implementation can support stall control? */ 1379 drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL); 1380 /* 1381 * NUMPROC - the number of PEs available for tracing, 5bits 1382 * = TRCIDR3.bits[13:12]bits[30:28] 1383 * bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0) 1384 * bits[3:0] = TRCIDR3.bits[30:28] 1385 */ 1386 drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) | 1387 FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3); 1388 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */ 1389 drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW); 1390 1391 /* number of resources trace unit supports */ 1392 etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4); 1393 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */ 1394 drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4); 1395 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */ 1396 drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4); 1397 /* 1398 * NUMRSPAIR, bits[19:16] 1399 * The number of resource pairs conveyed by the HW starts at 0, i.e a 1400 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on. 1401 * As such add 1 to the value of NUMRSPAIR for a better representation. 1402 * 1403 * For ETM v4.3 and later, 0x0 means 0, and no pairs are available - 1404 * the default TRUE and FALSE resource selectors are omitted. 1405 * Otherwise for values 0x1 and above the number is N + 1 as per v4.2. 1406 */ 1407 drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4); 1408 if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0)) 1409 drvdata->nr_resource += 1; 1410 /* 1411 * NUMSSCC, bits[23:20] the number of single-shot 1412 * comparator control for tracing. Read any status regs as these 1413 * also contain RO capability data. 1414 */ 1415 drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4); 1416 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 1417 drvdata->config.ss_status[i] = 1418 etm4x_relaxed_read32(csa, TRCSSCSRn(i)); 1419 } 1420 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */ 1421 drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4); 1422 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */ 1423 drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4); 1424 1425 etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5); 1426 /* NUMEXTIN, bits[8:0] number of external inputs implemented */ 1427 drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5); 1428 drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5); 1429 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */ 1430 drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5); 1431 /* ATBTRIG, bit[22] implementation can support ATB triggers? */ 1432 drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG); 1433 /* 1434 * LPOVERRIDE, bit[23] implementation supports 1435 * low-power state override 1436 */ 1437 drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up); 1438 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */ 1439 drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5); 1440 /* NUMCNTR, bits[30:28] number of counters available for tracing */ 1441 drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5); 1442 1443 coresight_clear_self_claim_tag_unlocked(csa); 1444 etm4_cs_lock(drvdata, csa); 1445 cpu_detect_trace_filtering(drvdata); 1446 } 1447 1448 static u32 etm4_get_victlr_access_type(struct etmv4_config *config) 1449 { 1450 return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK); 1451 } 1452 1453 /* Set ELx trace filter access in the TRCVICTLR register */ 1454 static void etm4_set_victlr_access(struct etmv4_config *config) 1455 { 1456 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK; 1457 config->vinst_ctrl |= etm4_get_victlr_access_type(config); 1458 } 1459 1460 static void etm4_set_default_config(struct etmv4_config *config) 1461 { 1462 /* disable all events tracing */ 1463 config->eventctrl0 = 0x0; 1464 config->eventctrl1 = 0x0; 1465 1466 /* disable stalling */ 1467 config->stall_ctrl = 0x0; 1468 1469 /* enable trace synchronization every 4096 bytes, if available */ 1470 config->syncfreq = 0xC; 1471 1472 /* disable timestamp event */ 1473 config->ts_ctrl = 0x0; 1474 1475 /* TRCVICTLR::EVENT = 0x01, select the always on logic */ 1476 config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01); 1477 1478 /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */ 1479 etm4_set_victlr_access(config); 1480 } 1481 1482 static u64 etm4_get_ns_access_type(struct etmv4_config *config) 1483 { 1484 u64 access_type = 0; 1485 1486 /* 1487 * EXLEVEL_NS, for NonSecure Exception levels. 1488 * The mask here is a generic value and must be 1489 * shifted to the corresponding field for the registers 1490 */ 1491 if (!is_kernel_in_hyp_mode()) { 1492 /* Stay away from hypervisor mode for non-VHE */ 1493 access_type = ETM_EXLEVEL_NS_HYP; 1494 if (config->mode & ETM_MODE_EXCL_KERN) 1495 access_type |= ETM_EXLEVEL_NS_OS; 1496 } else if (config->mode & ETM_MODE_EXCL_KERN) { 1497 access_type = ETM_EXLEVEL_NS_HYP; 1498 } 1499 1500 if (config->mode & ETM_MODE_EXCL_USER) 1501 access_type |= ETM_EXLEVEL_NS_APP; 1502 1503 return access_type; 1504 } 1505 1506 /* 1507 * Construct the exception level masks for a given config. 1508 * This must be shifted to the corresponding register field 1509 * for usage. 1510 */ 1511 static u64 etm4_get_access_type(struct etmv4_config *config) 1512 { 1513 /* All Secure exception levels are excluded from the trace */ 1514 return etm4_get_ns_access_type(config) | (u64)config->s_ex_level; 1515 } 1516 1517 static u64 etm4_get_comparator_access_type(struct etmv4_config *config) 1518 { 1519 return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT; 1520 } 1521 1522 static void etm4_set_comparator_filter(struct etmv4_config *config, 1523 u64 start, u64 stop, int comparator) 1524 { 1525 u64 access_type = etm4_get_comparator_access_type(config); 1526 1527 /* First half of default address comparator */ 1528 config->addr_val[comparator] = start; 1529 config->addr_acc[comparator] = access_type; 1530 config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE; 1531 1532 /* Second half of default address comparator */ 1533 config->addr_val[comparator + 1] = stop; 1534 config->addr_acc[comparator + 1] = access_type; 1535 config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE; 1536 1537 /* 1538 * Configure the ViewInst function to include this address range 1539 * comparator. 1540 * 1541 * @comparator is divided by two since it is the index in the 1542 * etmv4_config::addr_val array but register TRCVIIECTLR deals with 1543 * address range comparator _pairs_. 1544 * 1545 * Therefore: 1546 * index 0 -> compatator pair 0 1547 * index 2 -> comparator pair 1 1548 * index 4 -> comparator pair 2 1549 * ... 1550 * index 14 -> comparator pair 7 1551 */ 1552 config->viiectlr |= BIT(comparator / 2); 1553 } 1554 1555 static void etm4_set_start_stop_filter(struct etmv4_config *config, 1556 u64 address, int comparator, 1557 enum etm_addr_type type) 1558 { 1559 int shift; 1560 u64 access_type = etm4_get_comparator_access_type(config); 1561 1562 /* Configure the comparator */ 1563 config->addr_val[comparator] = address; 1564 config->addr_acc[comparator] = access_type; 1565 config->addr_type[comparator] = type; 1566 1567 /* 1568 * Configure ViewInst Start-Stop control register. 1569 * Addresses configured to start tracing go from bit 0 to n-1, 1570 * while those configured to stop tracing from 16 to 16 + n-1. 1571 */ 1572 shift = (type == ETM_ADDR_TYPE_START ? 0 : 16); 1573 config->vissctlr |= BIT(shift + comparator); 1574 } 1575 1576 static void etm4_set_default_filter(struct etmv4_config *config) 1577 { 1578 /* Trace everything 'default' filter achieved by no filtering */ 1579 config->viiectlr = 0x0; 1580 1581 /* 1582 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is 1583 * in the started state 1584 */ 1585 config->vinst_ctrl |= TRCVICTLR_SSSTATUS; 1586 config->mode |= ETM_MODE_VIEWINST_STARTSTOP; 1587 1588 /* No start-stop filtering for ViewInst */ 1589 config->vissctlr = 0x0; 1590 } 1591 1592 static void etm4_set_default(struct etmv4_config *config) 1593 { 1594 if (WARN_ON_ONCE(!config)) 1595 return; 1596 1597 /* 1598 * Make default initialisation trace everything 1599 * 1600 * This is done by a minimum default config sufficient to enable 1601 * full instruction trace - with a default filter for trace all 1602 * achieved by having no filtering. 1603 */ 1604 etm4_set_default_config(config); 1605 etm4_set_default_filter(config); 1606 } 1607 1608 static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type) 1609 { 1610 int nr_comparator, index = 0; 1611 struct etmv4_config *config = &drvdata->config; 1612 1613 /* 1614 * nr_addr_cmp holds the number of comparator _pair_, so time 2 1615 * for the total number of comparators. 1616 */ 1617 nr_comparator = drvdata->nr_addr_cmp * 2; 1618 1619 /* Go through the tally of comparators looking for a free one. */ 1620 while (index < nr_comparator) { 1621 switch (type) { 1622 case ETM_ADDR_TYPE_RANGE: 1623 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE && 1624 config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE) 1625 return index; 1626 1627 /* Address range comparators go in pairs */ 1628 index += 2; 1629 break; 1630 case ETM_ADDR_TYPE_START: 1631 case ETM_ADDR_TYPE_STOP: 1632 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE) 1633 return index; 1634 1635 /* Start/stop address can have odd indexes */ 1636 index += 1; 1637 break; 1638 default: 1639 return -EINVAL; 1640 } 1641 } 1642 1643 /* If we are here all the comparators have been used. */ 1644 return -ENOSPC; 1645 } 1646 1647 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, 1648 struct perf_event *event) 1649 { 1650 int i, comparator, ret = 0; 1651 u64 address; 1652 struct etmv4_config *config = &drvdata->config; 1653 struct etm_filters *filters = event->hw.addr_filters; 1654 1655 if (!filters) 1656 goto default_filter; 1657 1658 /* Sync events with what Perf got */ 1659 perf_event_addr_filters_sync(event); 1660 1661 /* 1662 * If there are no filters to deal with simply go ahead with 1663 * the default filter, i.e the entire address range. 1664 */ 1665 if (!filters->nr_filters) 1666 goto default_filter; 1667 1668 for (i = 0; i < filters->nr_filters; i++) { 1669 struct etm_filter *filter = &filters->etm_filter[i]; 1670 enum etm_addr_type type = filter->type; 1671 1672 /* See if a comparator is free. */ 1673 comparator = etm4_get_next_comparator(drvdata, type); 1674 if (comparator < 0) { 1675 ret = comparator; 1676 goto out; 1677 } 1678 1679 switch (type) { 1680 case ETM_ADDR_TYPE_RANGE: 1681 etm4_set_comparator_filter(config, 1682 filter->start_addr, 1683 filter->stop_addr, 1684 comparator); 1685 /* 1686 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is 1687 * in the started state 1688 */ 1689 config->vinst_ctrl |= TRCVICTLR_SSSTATUS; 1690 1691 /* No start-stop filtering for ViewInst */ 1692 config->vissctlr = 0x0; 1693 break; 1694 case ETM_ADDR_TYPE_START: 1695 case ETM_ADDR_TYPE_STOP: 1696 /* Get the right start or stop address */ 1697 address = (type == ETM_ADDR_TYPE_START ? 1698 filter->start_addr : 1699 filter->stop_addr); 1700 1701 /* Configure comparator */ 1702 etm4_set_start_stop_filter(config, address, 1703 comparator, type); 1704 1705 /* 1706 * If filters::ssstatus == 1, trace acquisition was 1707 * started but the process was yanked away before the 1708 * stop address was hit. As such the start/stop 1709 * logic needs to be re-started so that tracing can 1710 * resume where it left. 1711 * 1712 * The start/stop logic status when a process is 1713 * scheduled out is checked in function 1714 * etm4_disable_perf(). 1715 */ 1716 if (filters->ssstatus) 1717 config->vinst_ctrl |= TRCVICTLR_SSSTATUS; 1718 1719 /* No include/exclude filtering for ViewInst */ 1720 config->viiectlr = 0x0; 1721 break; 1722 default: 1723 ret = -EINVAL; 1724 goto out; 1725 } 1726 } 1727 1728 goto out; 1729 1730 1731 default_filter: 1732 etm4_set_default_filter(config); 1733 1734 out: 1735 return ret; 1736 } 1737 1738 void etm4_config_trace_mode(struct etmv4_config *config) 1739 { 1740 u32 mode; 1741 1742 mode = config->mode; 1743 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); 1744 1745 /* excluding kernel AND user space doesn't make sense */ 1746 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)); 1747 1748 /* nothing to do if neither flags are set */ 1749 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) 1750 return; 1751 1752 etm4_set_victlr_access(config); 1753 } 1754 1755 static int etm4_online_cpu(unsigned int cpu) 1756 { 1757 if (!etmdrvdata[cpu]) 1758 return etm4_probe_cpu(cpu); 1759 1760 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) 1761 coresight_enable_sysfs(etmdrvdata[cpu]->csdev); 1762 return 0; 1763 } 1764 1765 static int etm4_starting_cpu(unsigned int cpu) 1766 { 1767 if (!etmdrvdata[cpu]) 1768 return 0; 1769 1770 raw_spin_lock(&etmdrvdata[cpu]->spinlock); 1771 if (!etmdrvdata[cpu]->os_unlock) 1772 etm4_os_unlock(etmdrvdata[cpu]); 1773 1774 if (coresight_get_mode(etmdrvdata[cpu]->csdev)) 1775 etm4_enable_hw(etmdrvdata[cpu]); 1776 raw_spin_unlock(&etmdrvdata[cpu]->spinlock); 1777 return 0; 1778 } 1779 1780 static int etm4_dying_cpu(unsigned int cpu) 1781 { 1782 if (!etmdrvdata[cpu]) 1783 return 0; 1784 1785 raw_spin_lock(&etmdrvdata[cpu]->spinlock); 1786 if (coresight_get_mode(etmdrvdata[cpu]->csdev)) 1787 etm4_disable_hw(etmdrvdata[cpu]); 1788 raw_spin_unlock(&etmdrvdata[cpu]->spinlock); 1789 return 0; 1790 } 1791 1792 static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) 1793 { 1794 int i, ret = 0; 1795 struct etmv4_save_state *state; 1796 struct coresight_device *csdev = drvdata->csdev; 1797 struct csdev_access *csa; 1798 struct device *etm_dev; 1799 1800 if (WARN_ON(!csdev)) 1801 return -ENODEV; 1802 1803 etm_dev = &csdev->dev; 1804 csa = &csdev->access; 1805 1806 /* 1807 * As recommended by 3.4.1 ("The procedure when powering down the PE") 1808 * of ARM IHI 0064D 1809 */ 1810 dsb(sy); 1811 isb(); 1812 1813 etm4_cs_unlock(drvdata, csa); 1814 /* Lock the OS lock to disable trace and external debugger access */ 1815 etm4_os_lock(drvdata); 1816 1817 /* wait for TRCSTATR.PMSTABLE to go up */ 1818 if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { 1819 dev_err(etm_dev, 1820 "timeout while waiting for PM Stable Status\n"); 1821 etm4_os_unlock(drvdata); 1822 ret = -EBUSY; 1823 goto out; 1824 } 1825 1826 state = drvdata->save_state; 1827 1828 state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR); 1829 if (drvdata->nr_pe) 1830 state->trcprocselr = etm4x_read32(csa, TRCPROCSELR); 1831 state->trcconfigr = etm4x_read32(csa, TRCCONFIGR); 1832 state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR); 1833 state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R); 1834 state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R); 1835 if (drvdata->stallctl) 1836 state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR); 1837 state->trctsctlr = etm4x_read32(csa, TRCTSCTLR); 1838 state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR); 1839 state->trcccctlr = etm4x_read32(csa, TRCCCCTLR); 1840 state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR); 1841 state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR); 1842 if (drvdata->q_filt) 1843 state->trcqctlr = etm4x_read32(csa, TRCQCTLR); 1844 1845 state->trcvictlr = etm4x_read32(csa, TRCVICTLR); 1846 state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR); 1847 state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR); 1848 if (drvdata->nr_pe_cmp) 1849 state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR); 1850 1851 for (i = 0; i < drvdata->nrseqstate - 1; i++) 1852 state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i)); 1853 1854 if (drvdata->nrseqstate) { 1855 state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR); 1856 state->trcseqstr = etm4x_read32(csa, TRCSEQSTR); 1857 } 1858 1859 if (drvdata->numextinsel) 1860 state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR); 1861 1862 for (i = 0; i < drvdata->nr_cntr; i++) { 1863 state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i)); 1864 state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i)); 1865 state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i)); 1866 } 1867 1868 /* Resource selector pair 0 is reserved */ 1869 for (i = 2; i < drvdata->nr_resource * 2; i++) 1870 state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i)); 1871 1872 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 1873 state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i)); 1874 state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i)); 1875 if (etm4x_sspcicrn_present(drvdata, i)) 1876 state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i)); 1877 } 1878 1879 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { 1880 state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i)); 1881 state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i)); 1882 } 1883 1884 /* 1885 * Data trace stream is architecturally prohibited for A profile cores 1886 * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per 1887 * section 1.3.4 ("Possible functional configurations of an ETMv4 trace 1888 * unit") of ARM IHI 0064D. 1889 */ 1890 1891 for (i = 0; i < drvdata->numcidc; i++) 1892 state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i)); 1893 1894 for (i = 0; i < drvdata->numvmidc; i++) 1895 state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i)); 1896 1897 state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0); 1898 if (drvdata->numcidc > 4) 1899 state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1); 1900 1901 state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0); 1902 if (drvdata->numvmidc > 4) 1903 state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1); 1904 1905 state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR); 1906 1907 if (!drvdata->skip_power_up) 1908 state->trcpdcr = etm4x_read32(csa, TRCPDCR); 1909 1910 /* wait for TRCSTATR.IDLE to go up */ 1911 if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { 1912 dev_err(etm_dev, 1913 "timeout while waiting for Idle Trace Status\n"); 1914 etm4_os_unlock(drvdata); 1915 ret = -EBUSY; 1916 goto out; 1917 } 1918 1919 drvdata->state_needs_restore = true; 1920 1921 /* 1922 * Power can be removed from the trace unit now. We do this to 1923 * potentially save power on systems that respect the TRCPDCR_PU 1924 * despite requesting software to save/restore state. 1925 */ 1926 if (!drvdata->skip_power_up) 1927 etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU), 1928 TRCPDCR); 1929 out: 1930 etm4_cs_lock(drvdata, csa); 1931 return ret; 1932 } 1933 1934 static int etm4_cpu_save(struct etmv4_drvdata *drvdata) 1935 { 1936 int ret = 0; 1937 1938 /* Save the TRFCR irrespective of whether the ETM is ON */ 1939 if (drvdata->trfcr) 1940 drvdata->save_trfcr = read_trfcr(); 1941 /* 1942 * Save and restore the ETM Trace registers only if 1943 * the ETM is active. 1944 */ 1945 if (coresight_get_mode(drvdata->csdev) && drvdata->save_state) 1946 ret = __etm4_cpu_save(drvdata); 1947 return ret; 1948 } 1949 1950 static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) 1951 { 1952 int i; 1953 struct etmv4_save_state *state = drvdata->save_state; 1954 struct csdev_access *csa = &drvdata->csdev->access; 1955 1956 if (WARN_ON(!drvdata->csdev)) 1957 return; 1958 1959 etm4_cs_unlock(drvdata, csa); 1960 etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET); 1961 1962 etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR); 1963 if (drvdata->nr_pe) 1964 etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR); 1965 etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR); 1966 etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR); 1967 etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R); 1968 etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R); 1969 if (drvdata->stallctl) 1970 etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR); 1971 etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR); 1972 etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR); 1973 etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR); 1974 etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR); 1975 etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR); 1976 if (drvdata->q_filt) 1977 etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR); 1978 1979 etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR); 1980 etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR); 1981 etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR); 1982 if (drvdata->nr_pe_cmp) 1983 etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR); 1984 1985 for (i = 0; i < drvdata->nrseqstate - 1; i++) 1986 etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i)); 1987 1988 if (drvdata->nrseqstate) { 1989 etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR); 1990 etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR); 1991 } 1992 if (drvdata->numextinsel) 1993 etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR); 1994 1995 for (i = 0; i < drvdata->nr_cntr; i++) { 1996 etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i)); 1997 etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i)); 1998 etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i)); 1999 } 2000 2001 /* Resource selector pair 0 is reserved */ 2002 for (i = 2; i < drvdata->nr_resource * 2; i++) 2003 etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i)); 2004 2005 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 2006 etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i)); 2007 etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i)); 2008 if (etm4x_sspcicrn_present(drvdata, i)) 2009 etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i)); 2010 } 2011 2012 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { 2013 etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i)); 2014 etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i)); 2015 } 2016 2017 for (i = 0; i < drvdata->numcidc; i++) 2018 etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i)); 2019 2020 for (i = 0; i < drvdata->numvmidc; i++) 2021 etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i)); 2022 2023 etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0); 2024 if (drvdata->numcidc > 4) 2025 etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1); 2026 2027 etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0); 2028 if (drvdata->numvmidc > 4) 2029 etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1); 2030 2031 etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET); 2032 2033 if (!drvdata->skip_power_up) 2034 etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR); 2035 2036 drvdata->state_needs_restore = false; 2037 2038 /* 2039 * As recommended by section 4.3.7 ("Synchronization when using the 2040 * memory-mapped interface") of ARM IHI 0064D 2041 */ 2042 dsb(sy); 2043 isb(); 2044 2045 /* Unlock the OS lock to re-enable trace and external debug access */ 2046 etm4_os_unlock(drvdata); 2047 etm4_cs_lock(drvdata, csa); 2048 } 2049 2050 static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) 2051 { 2052 if (drvdata->trfcr) 2053 write_trfcr(drvdata->save_trfcr); 2054 if (drvdata->state_needs_restore) 2055 __etm4_cpu_restore(drvdata); 2056 } 2057 2058 static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, 2059 void *v) 2060 { 2061 struct etmv4_drvdata *drvdata; 2062 unsigned int cpu = smp_processor_id(); 2063 2064 if (!etmdrvdata[cpu]) 2065 return NOTIFY_OK; 2066 2067 drvdata = etmdrvdata[cpu]; 2068 2069 if (WARN_ON_ONCE(drvdata->cpu != cpu)) 2070 return NOTIFY_BAD; 2071 2072 switch (cmd) { 2073 case CPU_PM_ENTER: 2074 if (etm4_cpu_save(drvdata)) 2075 return NOTIFY_BAD; 2076 break; 2077 case CPU_PM_EXIT: 2078 case CPU_PM_ENTER_FAILED: 2079 etm4_cpu_restore(drvdata); 2080 break; 2081 default: 2082 return NOTIFY_DONE; 2083 } 2084 2085 return NOTIFY_OK; 2086 } 2087 2088 static struct notifier_block etm4_cpu_pm_nb = { 2089 .notifier_call = etm4_cpu_pm_notify, 2090 }; 2091 2092 /* Setup PM. Deals with error conditions and counts */ 2093 static int __init etm4_pm_setup(void) 2094 { 2095 int ret; 2096 2097 ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); 2098 if (ret) 2099 return ret; 2100 2101 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, 2102 "arm/coresight4:starting", 2103 etm4_starting_cpu, etm4_dying_cpu); 2104 2105 if (ret) 2106 goto unregister_notifier; 2107 2108 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2109 "arm/coresight4:online", 2110 etm4_online_cpu, NULL); 2111 2112 /* HP dyn state ID returned in ret on success */ 2113 if (ret > 0) { 2114 hp_online = ret; 2115 return 0; 2116 } 2117 2118 /* failed dyn state - remove others */ 2119 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 2120 2121 unregister_notifier: 2122 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 2123 return ret; 2124 } 2125 2126 static void etm4_pm_clear(void) 2127 { 2128 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); 2129 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 2130 if (hp_online) { 2131 cpuhp_remove_state_nocalls(hp_online); 2132 hp_online = 0; 2133 } 2134 } 2135 2136 static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg) 2137 { 2138 int ret; 2139 struct coresight_platform_data *pdata = NULL; 2140 struct device *dev = init_arg->dev; 2141 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); 2142 struct coresight_desc desc = { 0 }; 2143 u8 major, minor; 2144 char *type_name; 2145 2146 if (!drvdata) 2147 return -EINVAL; 2148 2149 desc.access = *init_arg->csa; 2150 2151 if (!drvdata->arch) 2152 return -EINVAL; 2153 2154 major = ETM_ARCH_MAJOR_VERSION(drvdata->arch); 2155 minor = ETM_ARCH_MINOR_VERSION(drvdata->arch); 2156 2157 if (etm4x_is_ete(drvdata)) { 2158 type_name = "ete"; 2159 /* ETE v1 has major version == 0b101. Adjust this for logging.*/ 2160 major -= 4; 2161 } else { 2162 type_name = "etm"; 2163 } 2164 2165 desc.name = devm_kasprintf(dev, GFP_KERNEL, 2166 "%s%d", type_name, drvdata->cpu); 2167 if (!desc.name) 2168 return -ENOMEM; 2169 2170 etm4_set_default(&drvdata->config); 2171 2172 pdata = coresight_get_platform_data(dev); 2173 if (IS_ERR(pdata)) 2174 return PTR_ERR(pdata); 2175 2176 dev->platform_data = pdata; 2177 2178 desc.type = CORESIGHT_DEV_TYPE_SOURCE; 2179 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; 2180 desc.ops = &etm4_cs_ops; 2181 desc.pdata = pdata; 2182 desc.dev = dev; 2183 desc.groups = coresight_etmv4_groups; 2184 drvdata->csdev = coresight_register(&desc); 2185 if (IS_ERR(drvdata->csdev)) 2186 return PTR_ERR(drvdata->csdev); 2187 2188 ret = etm_perf_symlink(drvdata->csdev, true); 2189 if (ret) { 2190 coresight_unregister(drvdata->csdev); 2191 return ret; 2192 } 2193 2194 /* register with config infrastructure & load any current features */ 2195 ret = etm4_cscfg_register(drvdata->csdev); 2196 if (ret) { 2197 coresight_unregister(drvdata->csdev); 2198 return ret; 2199 } 2200 2201 etmdrvdata[drvdata->cpu] = drvdata; 2202 2203 dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n", 2204 drvdata->cpu, type_name, major, minor); 2205 2206 if (boot_enable) { 2207 coresight_enable_sysfs(drvdata->csdev); 2208 drvdata->boot_enable = true; 2209 } 2210 2211 return 0; 2212 } 2213 2214 static int etm4_probe(struct device *dev) 2215 { 2216 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); 2217 struct csdev_access access = { 0 }; 2218 struct etm4_init_arg init_arg = { 0 }; 2219 struct etm4_init_arg *delayed; 2220 int ret; 2221 2222 if (WARN_ON(!drvdata)) 2223 return -ENOMEM; 2224 2225 ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk); 2226 if (ret) 2227 return ret; 2228 2229 if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE) 2230 pm_save_enable = coresight_loses_context_with_cpu(dev) ? 2231 PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER; 2232 2233 if (pm_save_enable != PARAM_PM_SAVE_NEVER) { 2234 drvdata->save_state = devm_kmalloc(dev, 2235 sizeof(struct etmv4_save_state), GFP_KERNEL); 2236 if (!drvdata->save_state) 2237 return -ENOMEM; 2238 } 2239 2240 raw_spin_lock_init(&drvdata->spinlock); 2241 2242 drvdata->cpu = coresight_get_cpu(dev); 2243 if (drvdata->cpu < 0) 2244 return drvdata->cpu; 2245 2246 init_arg.dev = dev; 2247 init_arg.csa = &access; 2248 2249 /* 2250 * Serialize against CPUHP callbacks to avoid race condition 2251 * between the smp call and saving the delayed probe. 2252 */ 2253 cpus_read_lock(); 2254 if (smp_call_function_single(drvdata->cpu, 2255 etm4_init_arch_data, &init_arg, 1)) { 2256 /* The CPU was offline, try again once it comes online. */ 2257 delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL); 2258 if (!delayed) { 2259 cpus_read_unlock(); 2260 return -ENOMEM; 2261 } 2262 2263 *delayed = init_arg; 2264 2265 per_cpu(delayed_probe, drvdata->cpu) = delayed; 2266 2267 cpus_read_unlock(); 2268 return 0; 2269 } 2270 cpus_read_unlock(); 2271 2272 return etm4_add_coresight_dev(&init_arg); 2273 } 2274 2275 static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id) 2276 { 2277 struct etmv4_drvdata *drvdata; 2278 void __iomem *base; 2279 struct device *dev = &adev->dev; 2280 struct resource *res = &adev->res; 2281 int ret; 2282 2283 /* Validity for the resource is already checked by the AMBA core */ 2284 base = devm_ioremap_resource(dev, res); 2285 if (IS_ERR(base)) 2286 return PTR_ERR(base); 2287 2288 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 2289 if (!drvdata) 2290 return -ENOMEM; 2291 2292 drvdata->base = base; 2293 dev_set_drvdata(dev, drvdata); 2294 ret = etm4_probe(dev); 2295 if (!ret) 2296 pm_runtime_put(&adev->dev); 2297 2298 return ret; 2299 } 2300 2301 static int etm4_probe_platform_dev(struct platform_device *pdev) 2302 { 2303 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2304 struct etmv4_drvdata *drvdata; 2305 int ret; 2306 2307 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); 2308 if (!drvdata) 2309 return -ENOMEM; 2310 2311 if (res) { 2312 drvdata->base = devm_ioremap_resource(&pdev->dev, res); 2313 if (IS_ERR(drvdata->base)) 2314 return PTR_ERR(drvdata->base); 2315 } 2316 2317 dev_set_drvdata(&pdev->dev, drvdata); 2318 pm_runtime_get_noresume(&pdev->dev); 2319 pm_runtime_set_active(&pdev->dev); 2320 pm_runtime_enable(&pdev->dev); 2321 2322 ret = etm4_probe(&pdev->dev); 2323 2324 pm_runtime_put(&pdev->dev); 2325 if (ret) 2326 pm_runtime_disable(&pdev->dev); 2327 2328 return ret; 2329 } 2330 2331 static int etm4_probe_cpu(unsigned int cpu) 2332 { 2333 int ret; 2334 struct etm4_init_arg init_arg; 2335 struct csdev_access access = { 0 }; 2336 struct etm4_init_arg *iap = *this_cpu_ptr(&delayed_probe); 2337 2338 if (!iap) 2339 return 0; 2340 2341 init_arg = *iap; 2342 devm_kfree(init_arg.dev, iap); 2343 *this_cpu_ptr(&delayed_probe) = NULL; 2344 2345 ret = pm_runtime_resume_and_get(init_arg.dev); 2346 if (ret < 0) { 2347 dev_err(init_arg.dev, "Failed to get PM runtime!\n"); 2348 return 0; 2349 } 2350 2351 init_arg.csa = &access; 2352 etm4_init_arch_data(&init_arg); 2353 2354 etm4_add_coresight_dev(&init_arg); 2355 2356 pm_runtime_put(init_arg.dev); 2357 return 0; 2358 } 2359 2360 static struct amba_cs_uci_id uci_id_etm4[] = { 2361 { 2362 /* ETMv4 UCI data */ 2363 .devarch = ETM_DEVARCH_ETMv4x_ARCH, 2364 .devarch_mask = ETM_DEVARCH_ID_MASK, 2365 .devtype = CS_DEVTYPE_PE_TRACE, 2366 } 2367 }; 2368 2369 static void clear_etmdrvdata(void *info) 2370 { 2371 int cpu = *(int *)info; 2372 2373 etmdrvdata[cpu] = NULL; 2374 per_cpu(delayed_probe, cpu) = NULL; 2375 } 2376 2377 static void etm4_remove_dev(struct etmv4_drvdata *drvdata) 2378 { 2379 bool had_delayed_probe; 2380 /* 2381 * Taking hotplug lock here to avoid racing between etm4_remove_dev() 2382 * and CPU hotplug call backs. 2383 */ 2384 cpus_read_lock(); 2385 2386 had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu); 2387 2388 /* 2389 * The readers for etmdrvdata[] are CPU hotplug call backs 2390 * and PM notification call backs. Change etmdrvdata[i] on 2391 * CPU i ensures these call backs has consistent view 2392 * inside one call back function. 2393 */ 2394 if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1)) 2395 clear_etmdrvdata(&drvdata->cpu); 2396 2397 cpus_read_unlock(); 2398 2399 if (!had_delayed_probe) { 2400 etm_perf_symlink(drvdata->csdev, false); 2401 cscfg_unregister_csdev(drvdata->csdev); 2402 coresight_unregister(drvdata->csdev); 2403 } 2404 } 2405 2406 static void etm4_remove_amba(struct amba_device *adev) 2407 { 2408 struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev); 2409 2410 if (drvdata) 2411 etm4_remove_dev(drvdata); 2412 } 2413 2414 static void etm4_remove_platform_dev(struct platform_device *pdev) 2415 { 2416 struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev); 2417 2418 if (drvdata) 2419 etm4_remove_dev(drvdata); 2420 pm_runtime_disable(&pdev->dev); 2421 } 2422 2423 static const struct amba_id etm4_ids[] = { 2424 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */ 2425 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */ 2426 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */ 2427 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */ 2428 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */ 2429 CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */ 2430 CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */ 2431 CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */ 2432 CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */ 2433 CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */ 2434 CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */ 2435 CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */ 2436 CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */ 2437 CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */ 2438 CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */ 2439 CS_AMBA_UCI_ID(0x000bbd0d, uci_id_etm4),/* Qualcomm Kryo 5XX Cortex-A77 */ 2440 CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */ 2441 CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */ 2442 CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */ 2443 /* 2444 * Match all PIDs with ETM4 DEVARCH. No need for adding any of the new 2445 * CPUs to the list here. 2446 */ 2447 CS_AMBA_MATCH_ALL_UCI(uci_id_etm4), 2448 {}, 2449 }; 2450 2451 MODULE_DEVICE_TABLE(amba, etm4_ids); 2452 2453 static struct amba_driver etm4x_amba_driver = { 2454 .drv = { 2455 .name = "coresight-etm4x", 2456 .suppress_bind_attrs = true, 2457 }, 2458 .probe = etm4_probe_amba, 2459 .remove = etm4_remove_amba, 2460 .id_table = etm4_ids, 2461 }; 2462 2463 #ifdef CONFIG_PM 2464 static int etm4_runtime_suspend(struct device *dev) 2465 { 2466 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); 2467 2468 clk_disable_unprepare(drvdata->atclk); 2469 clk_disable_unprepare(drvdata->pclk); 2470 2471 return 0; 2472 } 2473 2474 static int etm4_runtime_resume(struct device *dev) 2475 { 2476 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); 2477 int ret; 2478 2479 ret = clk_prepare_enable(drvdata->pclk); 2480 if (ret) 2481 return ret; 2482 2483 ret = clk_prepare_enable(drvdata->atclk); 2484 if (ret) 2485 clk_disable_unprepare(drvdata->pclk); 2486 2487 return ret; 2488 } 2489 #endif 2490 2491 static const struct dev_pm_ops etm4_dev_pm_ops = { 2492 SET_RUNTIME_PM_OPS(etm4_runtime_suspend, etm4_runtime_resume, NULL) 2493 }; 2494 2495 static const struct of_device_id etm4_sysreg_match[] = { 2496 { .compatible = "arm,coresight-etm4x-sysreg" }, 2497 { .compatible = "arm,embedded-trace-extension" }, 2498 {} 2499 }; 2500 2501 #ifdef CONFIG_ACPI 2502 static const struct acpi_device_id etm4x_acpi_ids[] = { 2503 {"ARMHC500", 0, 0, 0}, /* ARM CoreSight ETM4x */ 2504 {} 2505 }; 2506 MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids); 2507 #endif 2508 2509 static struct platform_driver etm4_platform_driver = { 2510 .probe = etm4_probe_platform_dev, 2511 .remove = etm4_remove_platform_dev, 2512 .driver = { 2513 .name = "coresight-etm4x", 2514 .of_match_table = etm4_sysreg_match, 2515 .acpi_match_table = ACPI_PTR(etm4x_acpi_ids), 2516 .suppress_bind_attrs = true, 2517 .pm = &etm4_dev_pm_ops, 2518 }, 2519 }; 2520 2521 static int __init etm4x_init(void) 2522 { 2523 int ret; 2524 2525 ret = etm4_pm_setup(); 2526 2527 /* etm4_pm_setup() does its own cleanup - exit on error */ 2528 if (ret) 2529 return ret; 2530 2531 ret = amba_driver_register(&etm4x_amba_driver); 2532 if (ret) { 2533 pr_err("Error registering etm4x AMBA driver\n"); 2534 goto clear_pm; 2535 } 2536 2537 ret = platform_driver_register(&etm4_platform_driver); 2538 if (!ret) 2539 return 0; 2540 2541 pr_err("Error registering etm4x platform driver\n"); 2542 amba_driver_unregister(&etm4x_amba_driver); 2543 2544 clear_pm: 2545 etm4_pm_clear(); 2546 return ret; 2547 } 2548 2549 static void __exit etm4x_exit(void) 2550 { 2551 amba_driver_unregister(&etm4x_amba_driver); 2552 platform_driver_unregister(&etm4_platform_driver); 2553 etm4_pm_clear(); 2554 } 2555 2556 module_init(etm4x_init); 2557 module_exit(etm4x_exit); 2558 2559 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); 2560 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>"); 2561 MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace v4.x driver"); 2562 MODULE_LICENSE("GPL v2"); 2563