1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/moduleparam.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/device.h>
13 #include <linux/io.h>
14 #include <linux/err.h>
15 #include <linux/fs.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/smp.h>
19 #include <linux/sysfs.h>
20 #include <linux/stat.h>
21 #include <linux/clk.h>
22 #include <linux/cpu.h>
23 #include <linux/cpu_pm.h>
24 #include <linux/coresight.h>
25 #include <linux/coresight-pmu.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/amba/bus.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/perf_event.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/property.h>
34 #include <linux/clk/clk-conf.h>
35
36 #include <asm/barrier.h>
37 #include <asm/sections.h>
38 #include <asm/sysreg.h>
39 #include <asm/local.h>
40 #include <asm/virt.h>
41
42 #include "coresight-etm4x.h"
43 #include "coresight-etm-perf.h"
44 #include "coresight-etm4x-cfg.h"
45 #include "coresight-self-hosted-trace.h"
46 #include "coresight-syscfg.h"
47 #include "coresight-trace-id.h"
48
49 static int boot_enable;
50 module_param(boot_enable, int, 0444);
51 MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
52
53 #define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
54 #define PARAM_PM_SAVE_NEVER 1 /* never save any state */
55 #define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
56
57 static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
58 module_param(pm_save_enable, int, 0444);
59 MODULE_PARM_DESC(pm_save_enable,
60 "Save/restore state on power down: 1 = never, 2 = self-hosted");
61
62 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
63 static void etm4_set_default_config(struct etmv4_config *config);
64 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
65 struct perf_event *event);
66 static u64 etm4_get_access_type(struct etmv4_config *config);
67
68 static enum cpuhp_state hp_online;
69
70 struct etm4_init_arg {
71 struct device *dev;
72 struct csdev_access *csa;
73 };
74
75 static DEFINE_PER_CPU(struct etm4_init_arg *, delayed_probe);
76 static int etm4_probe_cpu(unsigned int cpu);
77
78 /*
79 * Check if TRCSSPCICRn(i) is implemented for a given instance.
80 *
81 * TRCSSPCICRn is implemented only if :
82 * TRCSSPCICR<n> is present only if all of the following are true:
83 * TRCIDR4.NUMSSCC > n.
84 * TRCIDR4.NUMPC > 0b0000 .
85 * TRCSSCSR<n>.PC == 0b1
86 */
etm4x_sspcicrn_present(struct etmv4_drvdata * drvdata,int n)87 static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
88 {
89 return (n < drvdata->nr_ss_cmp) &&
90 drvdata->nr_pe &&
91 (drvdata->config.ss_status[n] & TRCSSCSRn_PC);
92 }
93
etm4x_sysreg_read(u32 offset,bool _relaxed,bool _64bit)94 u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
95 {
96 u64 res = 0;
97
98 switch (offset) {
99 ETM4x_READ_SYSREG_CASES(res)
100 default :
101 pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n",
102 offset);
103 }
104
105 if (!_relaxed)
106 __io_ar(res); /* Imitate the !relaxed I/O helpers */
107
108 return res;
109 }
110
etm4x_sysreg_write(u64 val,u32 offset,bool _relaxed,bool _64bit)111 void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
112 {
113 if (!_relaxed)
114 __io_bw(); /* Imitate the !relaxed I/O helpers */
115 if (!_64bit)
116 val &= GENMASK(31, 0);
117
118 switch (offset) {
119 ETM4x_WRITE_SYSREG_CASES(val)
120 default :
121 pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n",
122 offset);
123 }
124 }
125
ete_sysreg_read(u32 offset,bool _relaxed,bool _64bit)126 static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
127 {
128 u64 res = 0;
129
130 switch (offset) {
131 ETE_READ_CASES(res)
132 default :
133 pr_warn_ratelimited("ete: trying to read unsupported register @%x\n",
134 offset);
135 }
136
137 if (!_relaxed)
138 __io_ar(res); /* Imitate the !relaxed I/O helpers */
139
140 return res;
141 }
142
ete_sysreg_write(u64 val,u32 offset,bool _relaxed,bool _64bit)143 static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
144 {
145 if (!_relaxed)
146 __io_bw(); /* Imitate the !relaxed I/O helpers */
147 if (!_64bit)
148 val &= GENMASK(31, 0);
149
150 switch (offset) {
151 ETE_WRITE_CASES(val)
152 default :
153 pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n",
154 offset);
155 }
156 }
157
etm_detect_os_lock(struct etmv4_drvdata * drvdata,struct csdev_access * csa)158 static void etm_detect_os_lock(struct etmv4_drvdata *drvdata,
159 struct csdev_access *csa)
160 {
161 u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR);
162
163 drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr);
164 }
165
etm_write_os_lock(struct etmv4_drvdata * drvdata,struct csdev_access * csa,u32 val)166 static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
167 struct csdev_access *csa, u32 val)
168 {
169 val = !!val;
170
171 switch (drvdata->os_lock_model) {
172 case ETM_OSLOCK_PRESENT:
173 etm4x_relaxed_write32(csa, val, TRCOSLAR);
174 break;
175 case ETM_OSLOCK_PE:
176 write_sysreg_s(val, SYS_OSLAR_EL1);
177 break;
178 default:
179 pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n",
180 smp_processor_id(), drvdata->os_lock_model);
181 fallthrough;
182 case ETM_OSLOCK_NI:
183 return;
184 }
185 isb();
186 }
187
etm4_os_unlock_csa(struct etmv4_drvdata * drvdata,struct csdev_access * csa)188 static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
189 struct csdev_access *csa)
190 {
191 WARN_ON(drvdata->cpu != smp_processor_id());
192
193 /* Writing 0 to OS Lock unlocks the trace unit registers */
194 etm_write_os_lock(drvdata, csa, 0x0);
195 drvdata->os_unlock = true;
196 }
197
etm4_os_unlock(struct etmv4_drvdata * drvdata)198 static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
199 {
200 if (!WARN_ON(!drvdata->csdev))
201 etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
202 }
203
etm4_os_lock(struct etmv4_drvdata * drvdata)204 static void etm4_os_lock(struct etmv4_drvdata *drvdata)
205 {
206 if (WARN_ON(!drvdata->csdev))
207 return;
208 /* Writing 0x1 to OS Lock locks the trace registers */
209 etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1);
210 drvdata->os_unlock = false;
211 }
212
etm4_cs_lock(struct etmv4_drvdata * drvdata,struct csdev_access * csa)213 static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
214 struct csdev_access *csa)
215 {
216 /* Software Lock is only accessible via memory mapped interface */
217 if (csa->io_mem)
218 CS_LOCK(csa->base);
219 }
220
etm4_cs_unlock(struct etmv4_drvdata * drvdata,struct csdev_access * csa)221 static void etm4_cs_unlock(struct etmv4_drvdata *drvdata,
222 struct csdev_access *csa)
223 {
224 if (csa->io_mem)
225 CS_UNLOCK(csa->base);
226 }
227
etm4_cpu_id(struct coresight_device * csdev)228 static int etm4_cpu_id(struct coresight_device *csdev)
229 {
230 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
231
232 return drvdata->cpu;
233 }
234
etm4_read_alloc_trace_id(struct etmv4_drvdata * drvdata)235 int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata)
236 {
237 int trace_id;
238
239 /*
240 * This will allocate a trace ID to the cpu,
241 * or return the one currently allocated.
242 * The trace id function has its own lock
243 */
244 trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
245 if (IS_VALID_CS_TRACE_ID(trace_id))
246 drvdata->trcid = (u8)trace_id;
247 else
248 dev_err(&drvdata->csdev->dev,
249 "Failed to allocate trace ID for %s on CPU%d\n",
250 dev_name(&drvdata->csdev->dev), drvdata->cpu);
251 return trace_id;
252 }
253
etm4_release_trace_id(struct etmv4_drvdata * drvdata)254 void etm4_release_trace_id(struct etmv4_drvdata *drvdata)
255 {
256 coresight_trace_id_put_cpu_id(drvdata->cpu);
257 }
258
259 struct etm4_enable_arg {
260 struct etmv4_drvdata *drvdata;
261 int rc;
262 };
263
264 /*
265 * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs.
266 * When the CPU supports FEAT_TRF, we could move the ETM to a trace
267 * prohibited state by filtering the Exception levels via TRFCR_EL1.
268 */
etm4x_prohibit_trace(struct etmv4_drvdata * drvdata)269 static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
270 {
271 /* If the CPU doesn't support FEAT_TRF, nothing to do */
272 if (!drvdata->trfcr)
273 return;
274 cpu_prohibit_trace();
275 }
276
277 /*
278 * etm4x_allow_trace - Allow CPU tracing in the respective ELs,
279 * as configured by the drvdata->config.mode for the current
280 * session. Even though we have TRCVICTLR bits to filter the
281 * trace in the ELs, it doesn't prevent the ETM from generating
282 * a packet (e.g, TraceInfo) that might contain the addresses from
283 * the excluded levels. Thus we use the additional controls provided
284 * via the Trace Filtering controls (FEAT_TRF) to make sure no trace
285 * is generated for the excluded ELs.
286 */
etm4x_allow_trace(struct etmv4_drvdata * drvdata)287 static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
288 {
289 u64 trfcr = drvdata->trfcr;
290
291 /* If the CPU doesn't support FEAT_TRF, nothing to do */
292 if (!trfcr)
293 return;
294
295 if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
296 trfcr &= ~TRFCR_ELx_ExTRE;
297 if (drvdata->config.mode & ETM_MODE_EXCL_USER)
298 trfcr &= ~TRFCR_ELx_E0TRE;
299
300 write_trfcr(trfcr);
301 }
302
303 #ifdef CONFIG_ETM4X_IMPDEF_FEATURE
304
305 #define HISI_HIP08_AMBA_ID 0x000b6d01
306 #define ETM4_AMBA_MASK 0xfffff
307 #define HISI_HIP08_CORE_COMMIT_MASK 0x3000
308 #define HISI_HIP08_CORE_COMMIT_SHIFT 12
309 #define HISI_HIP08_CORE_COMMIT_FULL 0b00
310 #define HISI_HIP08_CORE_COMMIT_LVL_1 0b01
311 #define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5)
312
313 struct etm4_arch_features {
314 void (*arch_callback)(bool enable);
315 };
316
etm4_hisi_match_pid(unsigned int id)317 static bool etm4_hisi_match_pid(unsigned int id)
318 {
319 return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID;
320 }
321
etm4_hisi_config_core_commit(bool enable)322 static void etm4_hisi_config_core_commit(bool enable)
323 {
324 u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 :
325 HISI_HIP08_CORE_COMMIT_FULL;
326 u64 val;
327
328 /*
329 * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together
330 * to set core-commit, 2'b00 means cpu is at full speed, 2'b01,
331 * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1
332 * speed(minimun value). So bit 12 and 13 should be cleared together.
333 */
334 val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG);
335 val &= ~HISI_HIP08_CORE_COMMIT_MASK;
336 val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT;
337 write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG);
338 }
339
340 static struct etm4_arch_features etm4_features[] = {
341 [ETM4_IMPDEF_HISI_CORE_COMMIT] = {
342 .arch_callback = etm4_hisi_config_core_commit,
343 },
344 {},
345 };
346
etm4_enable_arch_specific(struct etmv4_drvdata * drvdata)347 static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
348 {
349 struct etm4_arch_features *ftr;
350 int bit;
351
352 for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
353 ftr = &etm4_features[bit];
354
355 if (ftr->arch_callback)
356 ftr->arch_callback(true);
357 }
358 }
359
etm4_disable_arch_specific(struct etmv4_drvdata * drvdata)360 static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
361 {
362 struct etm4_arch_features *ftr;
363 int bit;
364
365 for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
366 ftr = &etm4_features[bit];
367
368 if (ftr->arch_callback)
369 ftr->arch_callback(false);
370 }
371 }
372
etm4_check_arch_features(struct etmv4_drvdata * drvdata,struct csdev_access * csa)373 static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
374 struct csdev_access *csa)
375 {
376 /*
377 * TRCPIDR* registers are not required for ETMs with system
378 * instructions. They must be identified by the MIDR+REVIDRs.
379 * Skip the TRCPID checks for now.
380 */
381 if (!csa->io_mem)
382 return;
383
384 if (etm4_hisi_match_pid(coresight_get_pid(csa)))
385 set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
386 }
387 #else
etm4_enable_arch_specific(struct etmv4_drvdata * drvdata)388 static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
389 {
390 }
391
etm4_disable_arch_specific(struct etmv4_drvdata * drvdata)392 static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
393 {
394 }
395
etm4_check_arch_features(struct etmv4_drvdata * drvdata,struct csdev_access * csa)396 static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
397 struct csdev_access *csa)
398 {
399 }
400 #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
401
etm4_enable_hw(struct etmv4_drvdata * drvdata)402 static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
403 {
404 int i, rc;
405 struct etmv4_config *config = &drvdata->config;
406 struct coresight_device *csdev = drvdata->csdev;
407 struct device *etm_dev = &csdev->dev;
408 struct csdev_access *csa = &csdev->access;
409
410
411 etm4_cs_unlock(drvdata, csa);
412 etm4_enable_arch_specific(drvdata);
413
414 etm4_os_unlock(drvdata);
415
416 rc = coresight_claim_device_unlocked(csdev);
417 if (rc)
418 goto done;
419
420 /* Disable the trace unit before programming trace registers */
421 etm4x_relaxed_write32(csa, 0, TRCPRGCTLR);
422
423 /*
424 * If we use system instructions, we need to synchronize the
425 * write to the TRCPRGCTLR, before accessing the TRCSTATR.
426 * See ARM IHI0064F, section
427 * "4.3.7 Synchronization of register updates"
428 */
429 if (!csa->io_mem)
430 isb();
431
432 /* wait for TRCSTATR.IDLE to go up */
433 if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
434 dev_err(etm_dev,
435 "timeout while waiting for Idle Trace Status\n");
436 if (drvdata->nr_pe)
437 etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR);
438 etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR);
439 /* nothing specific implemented */
440 etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR);
441 etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R);
442 etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R);
443 if (drvdata->stallctl)
444 etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR);
445 etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR);
446 etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR);
447 etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR);
448 etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR);
449 etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR);
450 etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR);
451 etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR);
452 etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR);
453 if (drvdata->nr_pe_cmp)
454 etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
455 for (i = 0; i < drvdata->nrseqstate - 1; i++)
456 etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
457 if (drvdata->nrseqstate) {
458 etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
459 etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
460 }
461 etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
462 for (i = 0; i < drvdata->nr_cntr; i++) {
463 etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
464 etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
465 etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i));
466 }
467
468 /*
469 * Resource selector pair 0 is always implemented and reserved. As
470 * such start at 2.
471 */
472 for (i = 2; i < drvdata->nr_resource * 2; i++)
473 etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i));
474
475 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
476 /* always clear status bit on restart if using single-shot */
477 if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
478 config->ss_status[i] &= ~TRCSSCSRn_STATUS;
479 etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
480 etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
481 if (etm4x_sspcicrn_present(drvdata, i))
482 etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
483 }
484 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
485 etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
486 etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
487 }
488 for (i = 0; i < drvdata->numcidc; i++)
489 etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i));
490 etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0);
491 if (drvdata->numcidc > 4)
492 etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1);
493
494 for (i = 0; i < drvdata->numvmidc; i++)
495 etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i));
496 etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0);
497 if (drvdata->numvmidc > 4)
498 etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1);
499
500 if (!drvdata->skip_power_up) {
501 u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR);
502
503 /*
504 * Request to keep the trace unit powered and also
505 * emulation of powerdown
506 */
507 etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
508 }
509
510 /*
511 * ETE mandates that the TRCRSR is written to before
512 * enabling it.
513 */
514 if (etm4x_is_ete(drvdata))
515 etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
516
517 etm4x_allow_trace(drvdata);
518 /* Enable the trace unit */
519 etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
520
521 /* Synchronize the register updates for sysreg access */
522 if (!csa->io_mem)
523 isb();
524
525 /* wait for TRCSTATR.IDLE to go back down to '0' */
526 if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
527 dev_err(etm_dev,
528 "timeout while waiting for Idle Trace Status\n");
529
530 /*
531 * As recommended by section 4.3.7 ("Synchronization when using the
532 * memory-mapped interface") of ARM IHI 0064D
533 */
534 dsb(sy);
535 isb();
536
537 done:
538 etm4_cs_lock(drvdata, csa);
539
540 dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
541 drvdata->cpu, rc);
542 return rc;
543 }
544
etm4_enable_hw_smp_call(void * info)545 static void etm4_enable_hw_smp_call(void *info)
546 {
547 struct etm4_enable_arg *arg = info;
548
549 if (WARN_ON(!arg))
550 return;
551 arg->rc = etm4_enable_hw(arg->drvdata);
552 }
553
554 /*
555 * The goal of function etm4_config_timestamp_event() is to configure a
556 * counter that will tell the tracer to emit a timestamp packet when it
557 * reaches zero. This is done in order to get a more fine grained idea
558 * of when instructions are executed so that they can be correlated
559 * with execution on other CPUs.
560 *
561 * To do this the counter itself is configured to self reload and
562 * TRCRSCTLR1 (always true) used to get the counter to decrement. From
563 * there a resource selector is configured with the counter and the
564 * timestamp control register to use the resource selector to trigger the
565 * event that will insert a timestamp packet in the stream.
566 */
etm4_config_timestamp_event(struct etmv4_drvdata * drvdata)567 static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
568 {
569 int ctridx, ret = -EINVAL;
570 int counter, rselector;
571 u32 val = 0;
572 struct etmv4_config *config = &drvdata->config;
573
574 /* No point in trying if we don't have at least one counter */
575 if (!drvdata->nr_cntr)
576 goto out;
577
578 /* Find a counter that hasn't been initialised */
579 for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
580 if (config->cntr_val[ctridx] == 0)
581 break;
582
583 /* All the counters have been configured already, bail out */
584 if (ctridx == drvdata->nr_cntr) {
585 pr_debug("%s: no available counter found\n", __func__);
586 ret = -ENOSPC;
587 goto out;
588 }
589
590 /*
591 * Searching for an available resource selector to use, starting at
592 * '2' since every implementation has at least 2 resource selector.
593 * ETMIDR4 gives the number of resource selector _pairs_,
594 * hence multiply by 2.
595 */
596 for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
597 if (!config->res_ctrl[rselector])
598 break;
599
600 if (rselector == drvdata->nr_resource * 2) {
601 pr_debug("%s: no available resource selector found\n",
602 __func__);
603 ret = -ENOSPC;
604 goto out;
605 }
606
607 /* Remember what counter we used */
608 counter = 1 << ctridx;
609
610 /*
611 * Initialise original and reload counter value to the smallest
612 * possible value in order to get as much precision as we can.
613 */
614 config->cntr_val[ctridx] = 1;
615 config->cntrldvr[ctridx] = 1;
616
617 /* Set the trace counter control register */
618 val = 0x1 << 16 | /* Bit 16, reload counter automatically */
619 0x0 << 7 | /* Select single resource selector */
620 0x1; /* Resource selector 1, i.e always true */
621
622 config->cntr_ctrl[ctridx] = val;
623
624 val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */
625 counter << 0; /* Counter to use */
626
627 config->res_ctrl[rselector] = val;
628
629 val = 0x0 << 7 | /* Select single resource selector */
630 rselector; /* Resource selector */
631
632 config->ts_ctrl = val;
633
634 ret = 0;
635 out:
636 return ret;
637 }
638
etm4_parse_event_config(struct coresight_device * csdev,struct perf_event * event)639 static int etm4_parse_event_config(struct coresight_device *csdev,
640 struct perf_event *event)
641 {
642 int ret = 0;
643 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
644 struct etmv4_config *config = &drvdata->config;
645 struct perf_event_attr *attr = &event->attr;
646 unsigned long cfg_hash;
647 int preset, cc_threshold;
648
649 /* Clear configuration from previous run */
650 memset(config, 0, sizeof(struct etmv4_config));
651
652 if (attr->exclude_kernel)
653 config->mode = ETM_MODE_EXCL_KERN;
654
655 if (attr->exclude_user)
656 config->mode = ETM_MODE_EXCL_USER;
657
658 /* Always start from the default config */
659 etm4_set_default_config(config);
660
661 /* Configure filters specified on the perf cmd line, if any. */
662 ret = etm4_set_event_filters(drvdata, event);
663 if (ret)
664 goto out;
665
666 /* Go from generic option to ETMv4 specifics */
667 if (attr->config & BIT(ETM_OPT_CYCACC)) {
668 config->cfg |= TRCCONFIGR_CCI;
669 /* TRM: Must program this for cycacc to work */
670 cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK;
671 if (!cc_threshold)
672 cc_threshold = ETM_CYC_THRESHOLD_DEFAULT;
673 if (cc_threshold < drvdata->ccitmin)
674 cc_threshold = drvdata->ccitmin;
675 config->ccctlr = cc_threshold;
676 }
677 if (attr->config & BIT(ETM_OPT_TS)) {
678 /*
679 * Configure timestamps to be emitted at regular intervals in
680 * order to correlate instructions executed on different CPUs
681 * (CPU-wide trace scenarios).
682 */
683 ret = etm4_config_timestamp_event(drvdata);
684
685 /*
686 * No need to go further if timestamp intervals can't
687 * be configured.
688 */
689 if (ret)
690 goto out;
691
692 /* bit[11], Global timestamp tracing bit */
693 config->cfg |= TRCCONFIGR_TS;
694 }
695
696 /* Only trace contextID when runs in root PID namespace */
697 if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
698 task_is_in_init_pid_ns(current))
699 /* bit[6], Context ID tracing bit */
700 config->cfg |= TRCCONFIGR_CID;
701
702 /*
703 * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
704 * for recording CONTEXTIDR_EL2. Do not enable VMID tracing if the
705 * kernel is not running in EL2.
706 */
707 if (attr->config & BIT(ETM_OPT_CTXTID2)) {
708 if (!is_kernel_in_hyp_mode()) {
709 ret = -EINVAL;
710 goto out;
711 }
712 /* Only trace virtual contextID when runs in root PID namespace */
713 if (task_is_in_init_pid_ns(current))
714 config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
715 }
716
717 /* return stack - enable if selected and supported */
718 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
719 /* bit[12], Return stack enable bit */
720 config->cfg |= TRCCONFIGR_RS;
721
722 /*
723 * Set any selected configuration and preset.
724 *
725 * This extracts the values of PMU_FORMAT_ATTR(configid) and PMU_FORMAT_ATTR(preset)
726 * in the perf attributes defined in coresight-etm-perf.c.
727 * configid uses bits 63:32 of attr->config2, preset uses bits 3:0 of attr->config.
728 * A zero configid means no configuration active, preset = 0 means no preset selected.
729 */
730 if (attr->config2 & GENMASK_ULL(63, 32)) {
731 cfg_hash = (u32)(attr->config2 >> 32);
732 preset = attr->config & 0xF;
733 ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
734 }
735
736 /* branch broadcast - enable if selected and supported */
737 if (attr->config & BIT(ETM_OPT_BRANCH_BROADCAST)) {
738 if (!drvdata->trcbb) {
739 /*
740 * Missing BB support could cause silent decode errors
741 * so fail to open if it's not supported.
742 */
743 ret = -EINVAL;
744 goto out;
745 } else {
746 config->cfg |= BIT(ETM4_CFG_BIT_BB);
747 }
748 }
749
750 out:
751 return ret;
752 }
753
etm4_enable_perf(struct coresight_device * csdev,struct perf_event * event,struct coresight_trace_id_map * id_map)754 static int etm4_enable_perf(struct coresight_device *csdev,
755 struct perf_event *event,
756 struct coresight_trace_id_map *id_map)
757 {
758 int ret = 0, trace_id;
759 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
760
761 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
762 ret = -EINVAL;
763 goto out;
764 }
765
766 /* Configure the tracer based on the session's specifics */
767 ret = etm4_parse_event_config(csdev, event);
768 if (ret)
769 goto out;
770
771 /*
772 * perf allocates cpu ids as part of _setup_aux() - device needs to use
773 * the allocated ID. This reads the current version without allocation.
774 *
775 * This does not use the trace id lock to prevent lock_dep issues
776 * with perf locks - we know the ID cannot change until perf shuts down
777 * the session
778 */
779 trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
780 if (!IS_VALID_CS_TRACE_ID(trace_id)) {
781 dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
782 dev_name(&drvdata->csdev->dev), drvdata->cpu);
783 ret = -EINVAL;
784 goto out;
785 }
786 drvdata->trcid = (u8)trace_id;
787
788 /* And enable it */
789 ret = etm4_enable_hw(drvdata);
790
791 out:
792 return ret;
793 }
794
etm4_enable_sysfs(struct coresight_device * csdev)795 static int etm4_enable_sysfs(struct coresight_device *csdev)
796 {
797 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
798 struct etm4_enable_arg arg = { };
799 unsigned long cfg_hash;
800 int ret, preset;
801
802 /* enable any config activated by configfs */
803 cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset);
804 if (cfg_hash) {
805 ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
806 if (ret)
807 return ret;
808 }
809
810 spin_lock(&drvdata->spinlock);
811
812 /* sysfs needs to read and allocate a trace ID */
813 ret = etm4_read_alloc_trace_id(drvdata);
814 if (ret < 0)
815 goto unlock_sysfs_enable;
816
817 /*
818 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
819 * ensures that register writes occur when cpu is powered.
820 */
821 arg.drvdata = drvdata;
822 ret = smp_call_function_single(drvdata->cpu,
823 etm4_enable_hw_smp_call, &arg, 1);
824 if (!ret)
825 ret = arg.rc;
826 if (!ret)
827 drvdata->sticky_enable = true;
828
829 if (ret)
830 etm4_release_trace_id(drvdata);
831
832 unlock_sysfs_enable:
833 spin_unlock(&drvdata->spinlock);
834
835 if (!ret)
836 dev_dbg(&csdev->dev, "ETM tracing enabled\n");
837 return ret;
838 }
839
etm4_enable(struct coresight_device * csdev,struct perf_event * event,enum cs_mode mode,struct coresight_trace_id_map * id_map)840 static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
841 enum cs_mode mode, struct coresight_trace_id_map *id_map)
842 {
843 int ret;
844
845 if (!coresight_take_mode(csdev, mode)) {
846 /* Someone is already using the tracer */
847 return -EBUSY;
848 }
849
850 switch (mode) {
851 case CS_MODE_SYSFS:
852 ret = etm4_enable_sysfs(csdev);
853 break;
854 case CS_MODE_PERF:
855 ret = etm4_enable_perf(csdev, event, id_map);
856 break;
857 default:
858 ret = -EINVAL;
859 }
860
861 /* The tracer didn't start */
862 if (ret)
863 coresight_set_mode(csdev, CS_MODE_DISABLED);
864
865 return ret;
866 }
867
etm4_disable_hw(void * info)868 static void etm4_disable_hw(void *info)
869 {
870 u32 control;
871 struct etmv4_drvdata *drvdata = info;
872 struct etmv4_config *config = &drvdata->config;
873 struct coresight_device *csdev = drvdata->csdev;
874 struct device *etm_dev = &csdev->dev;
875 struct csdev_access *csa = &csdev->access;
876 int i;
877
878 etm4_cs_unlock(drvdata, csa);
879 etm4_disable_arch_specific(drvdata);
880
881 if (!drvdata->skip_power_up) {
882 /* power can be removed from the trace unit now */
883 control = etm4x_relaxed_read32(csa, TRCPDCR);
884 control &= ~TRCPDCR_PU;
885 etm4x_relaxed_write32(csa, control, TRCPDCR);
886 }
887
888 control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
889
890 /* EN, bit[0] Trace unit enable bit */
891 control &= ~0x1;
892
893 /*
894 * If the CPU supports v8.4 Trace filter Control,
895 * set the ETM to trace prohibited region.
896 */
897 etm4x_prohibit_trace(drvdata);
898 /*
899 * Make sure everything completes before disabling, as recommended
900 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
901 * SSTATUS") of ARM IHI 0064D
902 */
903 dsb(sy);
904 isb();
905 /* Trace synchronization barrier, is a nop if not supported */
906 tsb_csync();
907 etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
908
909 /* wait for TRCSTATR.PMSTABLE to go to '1' */
910 if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
911 dev_err(etm_dev,
912 "timeout while waiting for PM stable Trace Status\n");
913 /* read the status of the single shot comparators */
914 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
915 config->ss_status[i] =
916 etm4x_relaxed_read32(csa, TRCSSCSRn(i));
917 }
918
919 /* read back the current counter values */
920 for (i = 0; i < drvdata->nr_cntr; i++) {
921 config->cntr_val[i] =
922 etm4x_relaxed_read32(csa, TRCCNTVRn(i));
923 }
924
925 coresight_disclaim_device_unlocked(csdev);
926 etm4_cs_lock(drvdata, csa);
927
928 dev_dbg(&drvdata->csdev->dev,
929 "cpu: %d disable smp call done\n", drvdata->cpu);
930 }
931
etm4_disable_perf(struct coresight_device * csdev,struct perf_event * event)932 static int etm4_disable_perf(struct coresight_device *csdev,
933 struct perf_event *event)
934 {
935 u32 control;
936 struct etm_filters *filters = event->hw.addr_filters;
937 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
938 struct perf_event_attr *attr = &event->attr;
939
940 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
941 return -EINVAL;
942
943 etm4_disable_hw(drvdata);
944 /*
945 * The config_id occupies bits 63:32 of the config2 perf event attr
946 * field. If this is non-zero then we will have enabled a config.
947 */
948 if (attr->config2 & GENMASK_ULL(63, 32))
949 cscfg_csdev_disable_active_config(csdev);
950
951 /*
952 * Check if the start/stop logic was active when the unit was stopped.
953 * That way we can re-enable the start/stop logic when the process is
954 * scheduled again. Configuration of the start/stop logic happens in
955 * function etm4_set_event_filters().
956 */
957 control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR);
958 /* TRCVICTLR::SSSTATUS, bit[9] */
959 filters->ssstatus = (control & BIT(9));
960
961 /*
962 * perf will release trace ids when _free_aux() is
963 * called at the end of the session.
964 */
965
966 return 0;
967 }
968
etm4_disable_sysfs(struct coresight_device * csdev)969 static void etm4_disable_sysfs(struct coresight_device *csdev)
970 {
971 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
972
973 /*
974 * Taking hotplug lock here protects from clocks getting disabled
975 * with tracing being left on (crash scenario) if user disable occurs
976 * after cpu online mask indicates the cpu is offline but before the
977 * DYING hotplug callback is serviced by the ETM driver.
978 */
979 cpus_read_lock();
980 spin_lock(&drvdata->spinlock);
981
982 /*
983 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
984 * ensures that register writes occur when cpu is powered.
985 */
986 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
987
988 spin_unlock(&drvdata->spinlock);
989 cpus_read_unlock();
990
991 /*
992 * we only release trace IDs when resetting sysfs.
993 * This permits sysfs users to read the trace ID after the trace
994 * session has completed. This maintains operational behaviour with
995 * prior trace id allocation method
996 */
997
998 dev_dbg(&csdev->dev, "ETM tracing disabled\n");
999 }
1000
etm4_disable(struct coresight_device * csdev,struct perf_event * event)1001 static void etm4_disable(struct coresight_device *csdev,
1002 struct perf_event *event)
1003 {
1004 enum cs_mode mode;
1005
1006 /*
1007 * For as long as the tracer isn't disabled another entity can't
1008 * change its status. As such we can read the status here without
1009 * fearing it will change under us.
1010 */
1011 mode = coresight_get_mode(csdev);
1012
1013 switch (mode) {
1014 case CS_MODE_DISABLED:
1015 break;
1016 case CS_MODE_SYSFS:
1017 etm4_disable_sysfs(csdev);
1018 break;
1019 case CS_MODE_PERF:
1020 etm4_disable_perf(csdev, event);
1021 break;
1022 }
1023
1024 if (mode)
1025 coresight_set_mode(csdev, CS_MODE_DISABLED);
1026 }
1027
1028 static const struct coresight_ops_source etm4_source_ops = {
1029 .cpu_id = etm4_cpu_id,
1030 .enable = etm4_enable,
1031 .disable = etm4_disable,
1032 };
1033
1034 static const struct coresight_ops etm4_cs_ops = {
1035 .source_ops = &etm4_source_ops,
1036 };
1037
cpu_supports_sysreg_trace(void)1038 static inline bool cpu_supports_sysreg_trace(void)
1039 {
1040 u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
1041
1042 return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
1043 }
1044
etm4_init_sysreg_access(struct etmv4_drvdata * drvdata,struct csdev_access * csa)1045 static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
1046 struct csdev_access *csa)
1047 {
1048 u32 devarch;
1049
1050 if (!cpu_supports_sysreg_trace())
1051 return false;
1052
1053 /*
1054 * ETMs implementing sysreg access must implement TRCDEVARCH.
1055 */
1056 devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
1057 switch (devarch & ETM_DEVARCH_ID_MASK) {
1058 case ETM_DEVARCH_ETMv4x_ARCH:
1059 *csa = (struct csdev_access) {
1060 .io_mem = false,
1061 .read = etm4x_sysreg_read,
1062 .write = etm4x_sysreg_write,
1063 };
1064 break;
1065 case ETM_DEVARCH_ETE_ARCH:
1066 *csa = (struct csdev_access) {
1067 .io_mem = false,
1068 .read = ete_sysreg_read,
1069 .write = ete_sysreg_write,
1070 };
1071 break;
1072 default:
1073 return false;
1074 }
1075
1076 drvdata->arch = etm_devarch_to_arch(devarch);
1077 return true;
1078 }
1079
is_devtype_cpu_trace(void __iomem * base)1080 static bool is_devtype_cpu_trace(void __iomem *base)
1081 {
1082 u32 devtype = readl(base + TRCDEVTYPE);
1083
1084 return (devtype == CS_DEVTYPE_PE_TRACE);
1085 }
1086
etm4_init_iomem_access(struct etmv4_drvdata * drvdata,struct csdev_access * csa)1087 static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
1088 struct csdev_access *csa)
1089 {
1090 u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
1091
1092 if (!is_coresight_device(drvdata->base) || !is_devtype_cpu_trace(drvdata->base))
1093 return false;
1094
1095 /*
1096 * All ETMs must implement TRCDEVARCH to indicate that
1097 * the component is an ETMv4. Even though TRCIDR1 also
1098 * contains the information, it is part of the "Trace"
1099 * register and must be accessed with the OSLK cleared,
1100 * with MMIO. But we cannot touch the OSLK until we are
1101 * sure this is an ETM. So rely only on the TRCDEVARCH.
1102 */
1103 if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
1104 pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
1105 return false;
1106 }
1107
1108 drvdata->arch = etm_devarch_to_arch(devarch);
1109 *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
1110 return true;
1111 }
1112
etm4_init_csdev_access(struct etmv4_drvdata * drvdata,struct csdev_access * csa)1113 static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
1114 struct csdev_access *csa)
1115 {
1116 /*
1117 * Always choose the memory mapped io, if there is
1118 * a memory map to prevent sysreg access on broken
1119 * systems.
1120 */
1121 if (drvdata->base)
1122 return etm4_init_iomem_access(drvdata, csa);
1123
1124 if (etm4_init_sysreg_access(drvdata, csa))
1125 return true;
1126
1127 return false;
1128 }
1129
cpu_detect_trace_filtering(struct etmv4_drvdata * drvdata)1130 static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
1131 {
1132 u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
1133 u64 trfcr;
1134
1135 drvdata->trfcr = 0;
1136 if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
1137 return;
1138
1139 /*
1140 * If the CPU supports v8.4 SelfHosted Tracing, enable
1141 * tracing at the kernel EL and EL0, forcing to use the
1142 * virtual time as the timestamp.
1143 */
1144 trfcr = (TRFCR_ELx_TS_VIRTUAL |
1145 TRFCR_ELx_ExTRE |
1146 TRFCR_ELx_E0TRE);
1147
1148 /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
1149 if (is_kernel_in_hyp_mode())
1150 trfcr |= TRFCR_EL2_CX;
1151
1152 drvdata->trfcr = trfcr;
1153 }
1154
1155 /*
1156 * The following errata on applicable cpu ranges, affect the CCITMIN filed
1157 * in TCRIDR3 register. Software read for the field returns 0x100 limiting
1158 * the cycle threshold granularity, whereas the right value should have
1159 * been 0x4, which is well supported in the hardware.
1160 */
1161 static struct midr_range etm_wrong_ccitmin_cpus[] = {
1162 /* Erratum #1490853 - Cortex-A76 */
1163 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0),
1164 /* Erratum #1490853 - Neoverse-N1 */
1165 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0),
1166 /* Erratum #1491015 - Cortex-A77 */
1167 MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0),
1168 /* Erratum #1502854 - Cortex-X1 */
1169 MIDR_REV(MIDR_CORTEX_X1, 0, 0),
1170 /* Erratum #1619801 - Neoverse-V1 */
1171 MIDR_REV(MIDR_NEOVERSE_V1, 0, 0),
1172 {},
1173 };
1174
etm4_fixup_wrong_ccitmin(struct etmv4_drvdata * drvdata)1175 static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
1176 {
1177 /*
1178 * Erratum affected cpus will read 256 as the minimum
1179 * instruction trace cycle counting threshold whereas
1180 * the correct value should be 4 instead. Override the
1181 * recorded value for 'drvdata->ccitmin' to workaround
1182 * this problem.
1183 */
1184 if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
1185 if (drvdata->ccitmin == 256)
1186 drvdata->ccitmin = 4;
1187 }
1188 }
1189
etm4_init_arch_data(void * info)1190 static void etm4_init_arch_data(void *info)
1191 {
1192 u32 etmidr0;
1193 u32 etmidr2;
1194 u32 etmidr3;
1195 u32 etmidr4;
1196 u32 etmidr5;
1197 struct etm4_init_arg *init_arg = info;
1198 struct etmv4_drvdata *drvdata;
1199 struct csdev_access *csa;
1200 struct device *dev = init_arg->dev;
1201 int i;
1202
1203 drvdata = dev_get_drvdata(init_arg->dev);
1204 csa = init_arg->csa;
1205
1206 /*
1207 * If we are unable to detect the access mechanism,
1208 * or unable to detect the trace unit type, fail
1209 * early.
1210 */
1211 if (!etm4_init_csdev_access(drvdata, csa))
1212 return;
1213
1214 if (!csa->io_mem ||
1215 fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
1216 drvdata->skip_power_up = true;
1217
1218 /* Detect the support for OS Lock before we actually use it */
1219 etm_detect_os_lock(drvdata, csa);
1220
1221 /* Make sure all registers are accessible */
1222 etm4_os_unlock_csa(drvdata, csa);
1223 etm4_cs_unlock(drvdata, csa);
1224
1225 etm4_check_arch_features(drvdata, csa);
1226
1227 /* find all capabilities of the tracing unit */
1228 etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
1229
1230 /* INSTP0, bits[2:1] P0 tracing support field */
1231 drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
1232 /* TRCBB, bit[5] Branch broadcast tracing support bit */
1233 drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
1234 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
1235 drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
1236 /* TRCCCI, bit[7] Cycle counting instruction bit */
1237 drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
1238 /* RETSTACK, bit[9] Return stack bit */
1239 drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
1240 /* NUMEVENT, bits[11:10] Number of events field */
1241 drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
1242 /* QSUPP, bits[16:15] Q element support field */
1243 drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
1244 if (drvdata->q_support)
1245 drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
1246 /* TSSIZE, bits[28:24] Global timestamp size field */
1247 drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
1248
1249 /* maximum size of resources */
1250 etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
1251 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
1252 drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
1253 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
1254 drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
1255 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
1256 drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
1257
1258 etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
1259 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
1260 drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
1261 etm4_fixup_wrong_ccitmin(drvdata);
1262
1263 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
1264 drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
1265 drvdata->config.s_ex_level = drvdata->s_ex_level;
1266 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
1267 drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
1268 /*
1269 * TRCERR, bit[24] whether a trace unit can trace a
1270 * system error exception.
1271 */
1272 drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
1273 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
1274 drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
1275 /* STALLCTL, bit[26] is stall control implemented? */
1276 drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
1277 /* SYSSTALL, bit[27] implementation can support stall control? */
1278 drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
1279 /*
1280 * NUMPROC - the number of PEs available for tracing, 5bits
1281 * = TRCIDR3.bits[13:12]bits[30:28]
1282 * bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
1283 * bits[3:0] = TRCIDR3.bits[30:28]
1284 */
1285 drvdata->nr_pe = (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
1286 FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
1287 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
1288 drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
1289
1290 /* number of resources trace unit supports */
1291 etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
1292 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
1293 drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
1294 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
1295 drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
1296 /*
1297 * NUMRSPAIR, bits[19:16]
1298 * The number of resource pairs conveyed by the HW starts at 0, i.e a
1299 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
1300 * As such add 1 to the value of NUMRSPAIR for a better representation.
1301 *
1302 * For ETM v4.3 and later, 0x0 means 0, and no pairs are available -
1303 * the default TRUE and FALSE resource selectors are omitted.
1304 * Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
1305 */
1306 drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
1307 if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
1308 drvdata->nr_resource += 1;
1309 /*
1310 * NUMSSCC, bits[23:20] the number of single-shot
1311 * comparator control for tracing. Read any status regs as these
1312 * also contain RO capability data.
1313 */
1314 drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
1315 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
1316 drvdata->config.ss_status[i] =
1317 etm4x_relaxed_read32(csa, TRCSSCSRn(i));
1318 }
1319 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
1320 drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
1321 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
1322 drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
1323
1324 etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
1325 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
1326 drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
1327 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
1328 drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
1329 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
1330 drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
1331 /*
1332 * LPOVERRIDE, bit[23] implementation supports
1333 * low-power state override
1334 */
1335 drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
1336 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
1337 drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
1338 /* NUMCNTR, bits[30:28] number of counters available for tracing */
1339 drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
1340 etm4_cs_lock(drvdata, csa);
1341 cpu_detect_trace_filtering(drvdata);
1342 }
1343
etm4_get_victlr_access_type(struct etmv4_config * config)1344 static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
1345 {
1346 return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
1347 }
1348
1349 /* Set ELx trace filter access in the TRCVICTLR register */
etm4_set_victlr_access(struct etmv4_config * config)1350 static void etm4_set_victlr_access(struct etmv4_config *config)
1351 {
1352 config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK;
1353 config->vinst_ctrl |= etm4_get_victlr_access_type(config);
1354 }
1355
etm4_set_default_config(struct etmv4_config * config)1356 static void etm4_set_default_config(struct etmv4_config *config)
1357 {
1358 /* disable all events tracing */
1359 config->eventctrl0 = 0x0;
1360 config->eventctrl1 = 0x0;
1361
1362 /* disable stalling */
1363 config->stall_ctrl = 0x0;
1364
1365 /* enable trace synchronization every 4096 bytes, if available */
1366 config->syncfreq = 0xC;
1367
1368 /* disable timestamp event */
1369 config->ts_ctrl = 0x0;
1370
1371 /* TRCVICTLR::EVENT = 0x01, select the always on logic */
1372 config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
1373
1374 /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
1375 etm4_set_victlr_access(config);
1376 }
1377
etm4_get_ns_access_type(struct etmv4_config * config)1378 static u64 etm4_get_ns_access_type(struct etmv4_config *config)
1379 {
1380 u64 access_type = 0;
1381
1382 /*
1383 * EXLEVEL_NS, for NonSecure Exception levels.
1384 * The mask here is a generic value and must be
1385 * shifted to the corresponding field for the registers
1386 */
1387 if (!is_kernel_in_hyp_mode()) {
1388 /* Stay away from hypervisor mode for non-VHE */
1389 access_type = ETM_EXLEVEL_NS_HYP;
1390 if (config->mode & ETM_MODE_EXCL_KERN)
1391 access_type |= ETM_EXLEVEL_NS_OS;
1392 } else if (config->mode & ETM_MODE_EXCL_KERN) {
1393 access_type = ETM_EXLEVEL_NS_HYP;
1394 }
1395
1396 if (config->mode & ETM_MODE_EXCL_USER)
1397 access_type |= ETM_EXLEVEL_NS_APP;
1398
1399 return access_type;
1400 }
1401
1402 /*
1403 * Construct the exception level masks for a given config.
1404 * This must be shifted to the corresponding register field
1405 * for usage.
1406 */
etm4_get_access_type(struct etmv4_config * config)1407 static u64 etm4_get_access_type(struct etmv4_config *config)
1408 {
1409 /* All Secure exception levels are excluded from the trace */
1410 return etm4_get_ns_access_type(config) | (u64)config->s_ex_level;
1411 }
1412
etm4_get_comparator_access_type(struct etmv4_config * config)1413 static u64 etm4_get_comparator_access_type(struct etmv4_config *config)
1414 {
1415 return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT;
1416 }
1417
etm4_set_comparator_filter(struct etmv4_config * config,u64 start,u64 stop,int comparator)1418 static void etm4_set_comparator_filter(struct etmv4_config *config,
1419 u64 start, u64 stop, int comparator)
1420 {
1421 u64 access_type = etm4_get_comparator_access_type(config);
1422
1423 /* First half of default address comparator */
1424 config->addr_val[comparator] = start;
1425 config->addr_acc[comparator] = access_type;
1426 config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
1427
1428 /* Second half of default address comparator */
1429 config->addr_val[comparator + 1] = stop;
1430 config->addr_acc[comparator + 1] = access_type;
1431 config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
1432
1433 /*
1434 * Configure the ViewInst function to include this address range
1435 * comparator.
1436 *
1437 * @comparator is divided by two since it is the index in the
1438 * etmv4_config::addr_val array but register TRCVIIECTLR deals with
1439 * address range comparator _pairs_.
1440 *
1441 * Therefore:
1442 * index 0 -> compatator pair 0
1443 * index 2 -> comparator pair 1
1444 * index 4 -> comparator pair 2
1445 * ...
1446 * index 14 -> comparator pair 7
1447 */
1448 config->viiectlr |= BIT(comparator / 2);
1449 }
1450
etm4_set_start_stop_filter(struct etmv4_config * config,u64 address,int comparator,enum etm_addr_type type)1451 static void etm4_set_start_stop_filter(struct etmv4_config *config,
1452 u64 address, int comparator,
1453 enum etm_addr_type type)
1454 {
1455 int shift;
1456 u64 access_type = etm4_get_comparator_access_type(config);
1457
1458 /* Configure the comparator */
1459 config->addr_val[comparator] = address;
1460 config->addr_acc[comparator] = access_type;
1461 config->addr_type[comparator] = type;
1462
1463 /*
1464 * Configure ViewInst Start-Stop control register.
1465 * Addresses configured to start tracing go from bit 0 to n-1,
1466 * while those configured to stop tracing from 16 to 16 + n-1.
1467 */
1468 shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
1469 config->vissctlr |= BIT(shift + comparator);
1470 }
1471
etm4_set_default_filter(struct etmv4_config * config)1472 static void etm4_set_default_filter(struct etmv4_config *config)
1473 {
1474 /* Trace everything 'default' filter achieved by no filtering */
1475 config->viiectlr = 0x0;
1476
1477 /*
1478 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
1479 * in the started state
1480 */
1481 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
1482 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
1483
1484 /* No start-stop filtering for ViewInst */
1485 config->vissctlr = 0x0;
1486 }
1487
etm4_set_default(struct etmv4_config * config)1488 static void etm4_set_default(struct etmv4_config *config)
1489 {
1490 if (WARN_ON_ONCE(!config))
1491 return;
1492
1493 /*
1494 * Make default initialisation trace everything
1495 *
1496 * This is done by a minimum default config sufficient to enable
1497 * full instruction trace - with a default filter for trace all
1498 * achieved by having no filtering.
1499 */
1500 etm4_set_default_config(config);
1501 etm4_set_default_filter(config);
1502 }
1503
etm4_get_next_comparator(struct etmv4_drvdata * drvdata,u32 type)1504 static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
1505 {
1506 int nr_comparator, index = 0;
1507 struct etmv4_config *config = &drvdata->config;
1508
1509 /*
1510 * nr_addr_cmp holds the number of comparator _pair_, so time 2
1511 * for the total number of comparators.
1512 */
1513 nr_comparator = drvdata->nr_addr_cmp * 2;
1514
1515 /* Go through the tally of comparators looking for a free one. */
1516 while (index < nr_comparator) {
1517 switch (type) {
1518 case ETM_ADDR_TYPE_RANGE:
1519 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
1520 config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
1521 return index;
1522
1523 /* Address range comparators go in pairs */
1524 index += 2;
1525 break;
1526 case ETM_ADDR_TYPE_START:
1527 case ETM_ADDR_TYPE_STOP:
1528 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
1529 return index;
1530
1531 /* Start/stop address can have odd indexes */
1532 index += 1;
1533 break;
1534 default:
1535 return -EINVAL;
1536 }
1537 }
1538
1539 /* If we are here all the comparators have been used. */
1540 return -ENOSPC;
1541 }
1542
etm4_set_event_filters(struct etmv4_drvdata * drvdata,struct perf_event * event)1543 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
1544 struct perf_event *event)
1545 {
1546 int i, comparator, ret = 0;
1547 u64 address;
1548 struct etmv4_config *config = &drvdata->config;
1549 struct etm_filters *filters = event->hw.addr_filters;
1550
1551 if (!filters)
1552 goto default_filter;
1553
1554 /* Sync events with what Perf got */
1555 perf_event_addr_filters_sync(event);
1556
1557 /*
1558 * If there are no filters to deal with simply go ahead with
1559 * the default filter, i.e the entire address range.
1560 */
1561 if (!filters->nr_filters)
1562 goto default_filter;
1563
1564 for (i = 0; i < filters->nr_filters; i++) {
1565 struct etm_filter *filter = &filters->etm_filter[i];
1566 enum etm_addr_type type = filter->type;
1567
1568 /* See if a comparator is free. */
1569 comparator = etm4_get_next_comparator(drvdata, type);
1570 if (comparator < 0) {
1571 ret = comparator;
1572 goto out;
1573 }
1574
1575 switch (type) {
1576 case ETM_ADDR_TYPE_RANGE:
1577 etm4_set_comparator_filter(config,
1578 filter->start_addr,
1579 filter->stop_addr,
1580 comparator);
1581 /*
1582 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
1583 * in the started state
1584 */
1585 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
1586
1587 /* No start-stop filtering for ViewInst */
1588 config->vissctlr = 0x0;
1589 break;
1590 case ETM_ADDR_TYPE_START:
1591 case ETM_ADDR_TYPE_STOP:
1592 /* Get the right start or stop address */
1593 address = (type == ETM_ADDR_TYPE_START ?
1594 filter->start_addr :
1595 filter->stop_addr);
1596
1597 /* Configure comparator */
1598 etm4_set_start_stop_filter(config, address,
1599 comparator, type);
1600
1601 /*
1602 * If filters::ssstatus == 1, trace acquisition was
1603 * started but the process was yanked away before the
1604 * stop address was hit. As such the start/stop
1605 * logic needs to be re-started so that tracing can
1606 * resume where it left.
1607 *
1608 * The start/stop logic status when a process is
1609 * scheduled out is checked in function
1610 * etm4_disable_perf().
1611 */
1612 if (filters->ssstatus)
1613 config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
1614
1615 /* No include/exclude filtering for ViewInst */
1616 config->viiectlr = 0x0;
1617 break;
1618 default:
1619 ret = -EINVAL;
1620 goto out;
1621 }
1622 }
1623
1624 goto out;
1625
1626
1627 default_filter:
1628 etm4_set_default_filter(config);
1629
1630 out:
1631 return ret;
1632 }
1633
etm4_config_trace_mode(struct etmv4_config * config)1634 void etm4_config_trace_mode(struct etmv4_config *config)
1635 {
1636 u32 mode;
1637
1638 mode = config->mode;
1639 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
1640
1641 /* excluding kernel AND user space doesn't make sense */
1642 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
1643
1644 /* nothing to do if neither flags are set */
1645 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
1646 return;
1647
1648 etm4_set_victlr_access(config);
1649 }
1650
etm4_online_cpu(unsigned int cpu)1651 static int etm4_online_cpu(unsigned int cpu)
1652 {
1653 if (!etmdrvdata[cpu])
1654 return etm4_probe_cpu(cpu);
1655
1656 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
1657 coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
1658 return 0;
1659 }
1660
etm4_starting_cpu(unsigned int cpu)1661 static int etm4_starting_cpu(unsigned int cpu)
1662 {
1663 if (!etmdrvdata[cpu])
1664 return 0;
1665
1666 spin_lock(&etmdrvdata[cpu]->spinlock);
1667 if (!etmdrvdata[cpu]->os_unlock)
1668 etm4_os_unlock(etmdrvdata[cpu]);
1669
1670 if (coresight_get_mode(etmdrvdata[cpu]->csdev))
1671 etm4_enable_hw(etmdrvdata[cpu]);
1672 spin_unlock(&etmdrvdata[cpu]->spinlock);
1673 return 0;
1674 }
1675
etm4_dying_cpu(unsigned int cpu)1676 static int etm4_dying_cpu(unsigned int cpu)
1677 {
1678 if (!etmdrvdata[cpu])
1679 return 0;
1680
1681 spin_lock(&etmdrvdata[cpu]->spinlock);
1682 if (coresight_get_mode(etmdrvdata[cpu]->csdev))
1683 etm4_disable_hw(etmdrvdata[cpu]);
1684 spin_unlock(&etmdrvdata[cpu]->spinlock);
1685 return 0;
1686 }
1687
__etm4_cpu_save(struct etmv4_drvdata * drvdata)1688 static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
1689 {
1690 int i, ret = 0;
1691 struct etmv4_save_state *state;
1692 struct coresight_device *csdev = drvdata->csdev;
1693 struct csdev_access *csa;
1694 struct device *etm_dev;
1695
1696 if (WARN_ON(!csdev))
1697 return -ENODEV;
1698
1699 etm_dev = &csdev->dev;
1700 csa = &csdev->access;
1701
1702 /*
1703 * As recommended by 3.4.1 ("The procedure when powering down the PE")
1704 * of ARM IHI 0064D
1705 */
1706 dsb(sy);
1707 isb();
1708
1709 etm4_cs_unlock(drvdata, csa);
1710 /* Lock the OS lock to disable trace and external debugger access */
1711 etm4_os_lock(drvdata);
1712
1713 /* wait for TRCSTATR.PMSTABLE to go up */
1714 if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
1715 dev_err(etm_dev,
1716 "timeout while waiting for PM Stable Status\n");
1717 etm4_os_unlock(drvdata);
1718 ret = -EBUSY;
1719 goto out;
1720 }
1721
1722 state = drvdata->save_state;
1723
1724 state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
1725 if (drvdata->nr_pe)
1726 state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
1727 state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
1728 state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR);
1729 state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R);
1730 state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R);
1731 if (drvdata->stallctl)
1732 state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR);
1733 state->trctsctlr = etm4x_read32(csa, TRCTSCTLR);
1734 state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR);
1735 state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
1736 state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
1737 state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
1738 if (drvdata->q_filt)
1739 state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
1740
1741 state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
1742 state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
1743 state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
1744 if (drvdata->nr_pe_cmp)
1745 state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
1746
1747 for (i = 0; i < drvdata->nrseqstate - 1; i++)
1748 state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
1749
1750 if (drvdata->nrseqstate) {
1751 state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
1752 state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
1753 }
1754 state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
1755
1756 for (i = 0; i < drvdata->nr_cntr; i++) {
1757 state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
1758 state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i));
1759 state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
1760 }
1761
1762 /* Resource selector pair 0 is reserved */
1763 for (i = 2; i < drvdata->nr_resource * 2; i++)
1764 state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
1765
1766 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
1767 state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i));
1768 state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i));
1769 if (etm4x_sspcicrn_present(drvdata, i))
1770 state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i));
1771 }
1772
1773 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
1774 state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i));
1775 state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i));
1776 }
1777
1778 /*
1779 * Data trace stream is architecturally prohibited for A profile cores
1780 * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
1781 * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
1782 * unit") of ARM IHI 0064D.
1783 */
1784
1785 for (i = 0; i < drvdata->numcidc; i++)
1786 state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i));
1787
1788 for (i = 0; i < drvdata->numvmidc; i++)
1789 state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i));
1790
1791 state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0);
1792 if (drvdata->numcidc > 4)
1793 state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1);
1794
1795 state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0);
1796 if (drvdata->numvmidc > 4)
1797 state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1);
1798
1799 state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR);
1800
1801 if (!drvdata->skip_power_up)
1802 state->trcpdcr = etm4x_read32(csa, TRCPDCR);
1803
1804 /* wait for TRCSTATR.IDLE to go up */
1805 if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
1806 dev_err(etm_dev,
1807 "timeout while waiting for Idle Trace Status\n");
1808 etm4_os_unlock(drvdata);
1809 ret = -EBUSY;
1810 goto out;
1811 }
1812
1813 drvdata->state_needs_restore = true;
1814
1815 /*
1816 * Power can be removed from the trace unit now. We do this to
1817 * potentially save power on systems that respect the TRCPDCR_PU
1818 * despite requesting software to save/restore state.
1819 */
1820 if (!drvdata->skip_power_up)
1821 etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU),
1822 TRCPDCR);
1823 out:
1824 etm4_cs_lock(drvdata, csa);
1825 return ret;
1826 }
1827
etm4_cpu_save(struct etmv4_drvdata * drvdata)1828 static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
1829 {
1830 int ret = 0;
1831
1832 /* Save the TRFCR irrespective of whether the ETM is ON */
1833 if (drvdata->trfcr)
1834 drvdata->save_trfcr = read_trfcr();
1835 /*
1836 * Save and restore the ETM Trace registers only if
1837 * the ETM is active.
1838 */
1839 if (coresight_get_mode(drvdata->csdev) && drvdata->save_state)
1840 ret = __etm4_cpu_save(drvdata);
1841 return ret;
1842 }
1843
__etm4_cpu_restore(struct etmv4_drvdata * drvdata)1844 static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
1845 {
1846 int i;
1847 struct etmv4_save_state *state = drvdata->save_state;
1848 struct csdev_access *csa = &drvdata->csdev->access;
1849
1850 if (WARN_ON(!drvdata->csdev))
1851 return;
1852
1853 etm4_cs_unlock(drvdata, csa);
1854 etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
1855
1856 etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
1857 if (drvdata->nr_pe)
1858 etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
1859 etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
1860 etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR);
1861 etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R);
1862 etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R);
1863 if (drvdata->stallctl)
1864 etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR);
1865 etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR);
1866 etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR);
1867 etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
1868 etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
1869 etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
1870 if (drvdata->q_filt)
1871 etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
1872
1873 etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
1874 etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
1875 etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
1876 if (drvdata->nr_pe_cmp)
1877 etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
1878
1879 for (i = 0; i < drvdata->nrseqstate - 1; i++)
1880 etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
1881
1882 if (drvdata->nrseqstate) {
1883 etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
1884 etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
1885 }
1886 etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
1887
1888 for (i = 0; i < drvdata->nr_cntr; i++) {
1889 etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
1890 etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i));
1891 etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
1892 }
1893
1894 /* Resource selector pair 0 is reserved */
1895 for (i = 2; i < drvdata->nr_resource * 2; i++)
1896 etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
1897
1898 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
1899 etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i));
1900 etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i));
1901 if (etm4x_sspcicrn_present(drvdata, i))
1902 etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i));
1903 }
1904
1905 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
1906 etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i));
1907 etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i));
1908 }
1909
1910 for (i = 0; i < drvdata->numcidc; i++)
1911 etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i));
1912
1913 for (i = 0; i < drvdata->numvmidc; i++)
1914 etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i));
1915
1916 etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0);
1917 if (drvdata->numcidc > 4)
1918 etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1);
1919
1920 etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0);
1921 if (drvdata->numvmidc > 4)
1922 etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1);
1923
1924 etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
1925
1926 if (!drvdata->skip_power_up)
1927 etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
1928
1929 drvdata->state_needs_restore = false;
1930
1931 /*
1932 * As recommended by section 4.3.7 ("Synchronization when using the
1933 * memory-mapped interface") of ARM IHI 0064D
1934 */
1935 dsb(sy);
1936 isb();
1937
1938 /* Unlock the OS lock to re-enable trace and external debug access */
1939 etm4_os_unlock(drvdata);
1940 etm4_cs_lock(drvdata, csa);
1941 }
1942
etm4_cpu_restore(struct etmv4_drvdata * drvdata)1943 static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
1944 {
1945 if (drvdata->trfcr)
1946 write_trfcr(drvdata->save_trfcr);
1947 if (drvdata->state_needs_restore)
1948 __etm4_cpu_restore(drvdata);
1949 }
1950
etm4_cpu_pm_notify(struct notifier_block * nb,unsigned long cmd,void * v)1951 static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
1952 void *v)
1953 {
1954 struct etmv4_drvdata *drvdata;
1955 unsigned int cpu = smp_processor_id();
1956
1957 if (!etmdrvdata[cpu])
1958 return NOTIFY_OK;
1959
1960 drvdata = etmdrvdata[cpu];
1961
1962 if (WARN_ON_ONCE(drvdata->cpu != cpu))
1963 return NOTIFY_BAD;
1964
1965 switch (cmd) {
1966 case CPU_PM_ENTER:
1967 if (etm4_cpu_save(drvdata))
1968 return NOTIFY_BAD;
1969 break;
1970 case CPU_PM_EXIT:
1971 case CPU_PM_ENTER_FAILED:
1972 etm4_cpu_restore(drvdata);
1973 break;
1974 default:
1975 return NOTIFY_DONE;
1976 }
1977
1978 return NOTIFY_OK;
1979 }
1980
1981 static struct notifier_block etm4_cpu_pm_nb = {
1982 .notifier_call = etm4_cpu_pm_notify,
1983 };
1984
1985 /* Setup PM. Deals with error conditions and counts */
etm4_pm_setup(void)1986 static int __init etm4_pm_setup(void)
1987 {
1988 int ret;
1989
1990 ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
1991 if (ret)
1992 return ret;
1993
1994 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
1995 "arm/coresight4:starting",
1996 etm4_starting_cpu, etm4_dying_cpu);
1997
1998 if (ret)
1999 goto unregister_notifier;
2000
2001 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2002 "arm/coresight4:online",
2003 etm4_online_cpu, NULL);
2004
2005 /* HP dyn state ID returned in ret on success */
2006 if (ret > 0) {
2007 hp_online = ret;
2008 return 0;
2009 }
2010
2011 /* failed dyn state - remove others */
2012 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
2013
2014 unregister_notifier:
2015 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
2016 return ret;
2017 }
2018
etm4_pm_clear(void)2019 static void etm4_pm_clear(void)
2020 {
2021 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
2022 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
2023 if (hp_online) {
2024 cpuhp_remove_state_nocalls(hp_online);
2025 hp_online = 0;
2026 }
2027 }
2028
etm4_add_coresight_dev(struct etm4_init_arg * init_arg)2029 static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
2030 {
2031 int ret;
2032 struct coresight_platform_data *pdata = NULL;
2033 struct device *dev = init_arg->dev;
2034 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2035 struct coresight_desc desc = { 0 };
2036 u8 major, minor;
2037 char *type_name;
2038
2039 if (!drvdata)
2040 return -EINVAL;
2041
2042 desc.access = *init_arg->csa;
2043
2044 if (!drvdata->arch)
2045 return -EINVAL;
2046
2047 major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
2048 minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
2049
2050 if (etm4x_is_ete(drvdata)) {
2051 type_name = "ete";
2052 /* ETE v1 has major version == 0b101. Adjust this for logging.*/
2053 major -= 4;
2054 } else {
2055 type_name = "etm";
2056 }
2057
2058 desc.name = devm_kasprintf(dev, GFP_KERNEL,
2059 "%s%d", type_name, drvdata->cpu);
2060 if (!desc.name)
2061 return -ENOMEM;
2062
2063 etm4_set_default(&drvdata->config);
2064
2065 pdata = coresight_get_platform_data(dev);
2066 if (IS_ERR(pdata))
2067 return PTR_ERR(pdata);
2068
2069 dev->platform_data = pdata;
2070
2071 desc.type = CORESIGHT_DEV_TYPE_SOURCE;
2072 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2073 desc.ops = &etm4_cs_ops;
2074 desc.pdata = pdata;
2075 desc.dev = dev;
2076 desc.groups = coresight_etmv4_groups;
2077 drvdata->csdev = coresight_register(&desc);
2078 if (IS_ERR(drvdata->csdev))
2079 return PTR_ERR(drvdata->csdev);
2080
2081 ret = etm_perf_symlink(drvdata->csdev, true);
2082 if (ret) {
2083 coresight_unregister(drvdata->csdev);
2084 return ret;
2085 }
2086
2087 /* register with config infrastructure & load any current features */
2088 ret = etm4_cscfg_register(drvdata->csdev);
2089 if (ret) {
2090 coresight_unregister(drvdata->csdev);
2091 return ret;
2092 }
2093
2094 etmdrvdata[drvdata->cpu] = drvdata;
2095
2096 dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n",
2097 drvdata->cpu, type_name, major, minor);
2098
2099 if (boot_enable) {
2100 coresight_enable_sysfs(drvdata->csdev);
2101 drvdata->boot_enable = true;
2102 }
2103
2104 return 0;
2105 }
2106
etm4_probe(struct device * dev)2107 static int etm4_probe(struct device *dev)
2108 {
2109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2110 struct csdev_access access = { 0 };
2111 struct etm4_init_arg init_arg = { 0 };
2112 struct etm4_init_arg *delayed;
2113
2114 if (WARN_ON(!drvdata))
2115 return -ENOMEM;
2116
2117 if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
2118 pm_save_enable = coresight_loses_context_with_cpu(dev) ?
2119 PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
2120
2121 if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
2122 drvdata->save_state = devm_kmalloc(dev,
2123 sizeof(struct etmv4_save_state), GFP_KERNEL);
2124 if (!drvdata->save_state)
2125 return -ENOMEM;
2126 }
2127
2128 spin_lock_init(&drvdata->spinlock);
2129
2130 drvdata->cpu = coresight_get_cpu(dev);
2131 if (drvdata->cpu < 0)
2132 return drvdata->cpu;
2133
2134 init_arg.dev = dev;
2135 init_arg.csa = &access;
2136
2137 /*
2138 * Serialize against CPUHP callbacks to avoid race condition
2139 * between the smp call and saving the delayed probe.
2140 */
2141 cpus_read_lock();
2142 if (smp_call_function_single(drvdata->cpu,
2143 etm4_init_arch_data, &init_arg, 1)) {
2144 /* The CPU was offline, try again once it comes online. */
2145 delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL);
2146 if (!delayed) {
2147 cpus_read_unlock();
2148 return -ENOMEM;
2149 }
2150
2151 *delayed = init_arg;
2152
2153 per_cpu(delayed_probe, drvdata->cpu) = delayed;
2154
2155 cpus_read_unlock();
2156 return 0;
2157 }
2158 cpus_read_unlock();
2159
2160 return etm4_add_coresight_dev(&init_arg);
2161 }
2162
etm4_probe_amba(struct amba_device * adev,const struct amba_id * id)2163 static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
2164 {
2165 struct etmv4_drvdata *drvdata;
2166 void __iomem *base;
2167 struct device *dev = &adev->dev;
2168 struct resource *res = &adev->res;
2169 int ret;
2170
2171 /* Validity for the resource is already checked by the AMBA core */
2172 base = devm_ioremap_resource(dev, res);
2173 if (IS_ERR(base))
2174 return PTR_ERR(base);
2175
2176 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2177 if (!drvdata)
2178 return -ENOMEM;
2179
2180 drvdata->base = base;
2181 dev_set_drvdata(dev, drvdata);
2182 ret = etm4_probe(dev);
2183 if (!ret)
2184 pm_runtime_put(&adev->dev);
2185
2186 return ret;
2187 }
2188
etm4_probe_platform_dev(struct platform_device * pdev)2189 static int etm4_probe_platform_dev(struct platform_device *pdev)
2190 {
2191 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2192 struct etmv4_drvdata *drvdata;
2193 int ret;
2194
2195 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
2196 if (!drvdata)
2197 return -ENOMEM;
2198
2199 drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
2200 if (IS_ERR(drvdata->pclk))
2201 return -ENODEV;
2202
2203 if (res) {
2204 drvdata->base = devm_ioremap_resource(&pdev->dev, res);
2205 if (IS_ERR(drvdata->base)) {
2206 clk_put(drvdata->pclk);
2207 return PTR_ERR(drvdata->base);
2208 }
2209 }
2210
2211 dev_set_drvdata(&pdev->dev, drvdata);
2212 pm_runtime_get_noresume(&pdev->dev);
2213 pm_runtime_set_active(&pdev->dev);
2214 pm_runtime_enable(&pdev->dev);
2215
2216 ret = etm4_probe(&pdev->dev);
2217
2218 pm_runtime_put(&pdev->dev);
2219 if (ret)
2220 pm_runtime_disable(&pdev->dev);
2221
2222 return ret;
2223 }
2224
etm4_probe_cpu(unsigned int cpu)2225 static int etm4_probe_cpu(unsigned int cpu)
2226 {
2227 int ret;
2228 struct etm4_init_arg init_arg;
2229 struct csdev_access access = { 0 };
2230 struct etm4_init_arg *iap = *this_cpu_ptr(&delayed_probe);
2231
2232 if (!iap)
2233 return 0;
2234
2235 init_arg = *iap;
2236 devm_kfree(init_arg.dev, iap);
2237 *this_cpu_ptr(&delayed_probe) = NULL;
2238
2239 ret = pm_runtime_resume_and_get(init_arg.dev);
2240 if (ret < 0) {
2241 dev_err(init_arg.dev, "Failed to get PM runtime!\n");
2242 return 0;
2243 }
2244
2245 init_arg.csa = &access;
2246 etm4_init_arch_data(&init_arg);
2247
2248 etm4_add_coresight_dev(&init_arg);
2249
2250 pm_runtime_put(init_arg.dev);
2251 return 0;
2252 }
2253
2254 static struct amba_cs_uci_id uci_id_etm4[] = {
2255 {
2256 /* ETMv4 UCI data */
2257 .devarch = ETM_DEVARCH_ETMv4x_ARCH,
2258 .devarch_mask = ETM_DEVARCH_ID_MASK,
2259 .devtype = CS_DEVTYPE_PE_TRACE,
2260 }
2261 };
2262
clear_etmdrvdata(void * info)2263 static void clear_etmdrvdata(void *info)
2264 {
2265 int cpu = *(int *)info;
2266
2267 etmdrvdata[cpu] = NULL;
2268 per_cpu(delayed_probe, cpu) = NULL;
2269 }
2270
etm4_remove_dev(struct etmv4_drvdata * drvdata)2271 static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
2272 {
2273 bool had_delayed_probe;
2274 /*
2275 * Taking hotplug lock here to avoid racing between etm4_remove_dev()
2276 * and CPU hotplug call backs.
2277 */
2278 cpus_read_lock();
2279
2280 had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu);
2281
2282 /*
2283 * The readers for etmdrvdata[] are CPU hotplug call backs
2284 * and PM notification call backs. Change etmdrvdata[i] on
2285 * CPU i ensures these call backs has consistent view
2286 * inside one call back function.
2287 */
2288 if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
2289 clear_etmdrvdata(&drvdata->cpu);
2290
2291 cpus_read_unlock();
2292
2293 if (!had_delayed_probe) {
2294 etm_perf_symlink(drvdata->csdev, false);
2295 cscfg_unregister_csdev(drvdata->csdev);
2296 coresight_unregister(drvdata->csdev);
2297 }
2298 }
2299
etm4_remove_amba(struct amba_device * adev)2300 static void etm4_remove_amba(struct amba_device *adev)
2301 {
2302 struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
2303
2304 if (drvdata)
2305 etm4_remove_dev(drvdata);
2306 }
2307
etm4_remove_platform_dev(struct platform_device * pdev)2308 static void etm4_remove_platform_dev(struct platform_device *pdev)
2309 {
2310 struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
2311
2312 if (drvdata)
2313 etm4_remove_dev(drvdata);
2314 pm_runtime_disable(&pdev->dev);
2315
2316 if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
2317 clk_put(drvdata->pclk);
2318 }
2319
2320 static const struct amba_id etm4_ids[] = {
2321 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
2322 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
2323 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
2324 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
2325 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
2326 CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
2327 CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
2328 CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
2329 CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */
2330 CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
2331 CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
2332 CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
2333 CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
2334 CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
2335 CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
2336 CS_AMBA_UCI_ID(0x000bbd0d, uci_id_etm4),/* Qualcomm Kryo 5XX Cortex-A77 */
2337 CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
2338 CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
2339 CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
2340 /*
2341 * Match all PIDs with ETM4 DEVARCH. No need for adding any of the new
2342 * CPUs to the list here.
2343 */
2344 CS_AMBA_MATCH_ALL_UCI(uci_id_etm4),
2345 {},
2346 };
2347
2348 MODULE_DEVICE_TABLE(amba, etm4_ids);
2349
2350 static struct amba_driver etm4x_amba_driver = {
2351 .drv = {
2352 .name = "coresight-etm4x",
2353 .suppress_bind_attrs = true,
2354 },
2355 .probe = etm4_probe_amba,
2356 .remove = etm4_remove_amba,
2357 .id_table = etm4_ids,
2358 };
2359
2360 #ifdef CONFIG_PM
etm4_runtime_suspend(struct device * dev)2361 static int etm4_runtime_suspend(struct device *dev)
2362 {
2363 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2364
2365 if (drvdata->pclk && !IS_ERR(drvdata->pclk))
2366 clk_disable_unprepare(drvdata->pclk);
2367
2368 return 0;
2369 }
2370
etm4_runtime_resume(struct device * dev)2371 static int etm4_runtime_resume(struct device *dev)
2372 {
2373 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2374
2375 if (drvdata->pclk && !IS_ERR(drvdata->pclk))
2376 clk_prepare_enable(drvdata->pclk);
2377
2378 return 0;
2379 }
2380 #endif
2381
2382 static const struct dev_pm_ops etm4_dev_pm_ops = {
2383 SET_RUNTIME_PM_OPS(etm4_runtime_suspend, etm4_runtime_resume, NULL)
2384 };
2385
2386 static const struct of_device_id etm4_sysreg_match[] = {
2387 { .compatible = "arm,coresight-etm4x-sysreg" },
2388 { .compatible = "arm,embedded-trace-extension" },
2389 {}
2390 };
2391
2392 #ifdef CONFIG_ACPI
2393 static const struct acpi_device_id etm4x_acpi_ids[] = {
2394 {"ARMHC500", 0, 0, 0}, /* ARM CoreSight ETM4x */
2395 {}
2396 };
2397 MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
2398 #endif
2399
2400 static struct platform_driver etm4_platform_driver = {
2401 .probe = etm4_probe_platform_dev,
2402 .remove = etm4_remove_platform_dev,
2403 .driver = {
2404 .name = "coresight-etm4x",
2405 .of_match_table = etm4_sysreg_match,
2406 .acpi_match_table = ACPI_PTR(etm4x_acpi_ids),
2407 .suppress_bind_attrs = true,
2408 .pm = &etm4_dev_pm_ops,
2409 },
2410 };
2411
etm4x_init(void)2412 static int __init etm4x_init(void)
2413 {
2414 int ret;
2415
2416 ret = etm4_pm_setup();
2417
2418 /* etm4_pm_setup() does its own cleanup - exit on error */
2419 if (ret)
2420 return ret;
2421
2422 ret = amba_driver_register(&etm4x_amba_driver);
2423 if (ret) {
2424 pr_err("Error registering etm4x AMBA driver\n");
2425 goto clear_pm;
2426 }
2427
2428 ret = platform_driver_register(&etm4_platform_driver);
2429 if (!ret)
2430 return 0;
2431
2432 pr_err("Error registering etm4x platform driver\n");
2433 amba_driver_unregister(&etm4x_amba_driver);
2434
2435 clear_pm:
2436 etm4_pm_clear();
2437 return ret;
2438 }
2439
etm4x_exit(void)2440 static void __exit etm4x_exit(void)
2441 {
2442 amba_driver_unregister(&etm4x_amba_driver);
2443 platform_driver_unregister(&etm4_platform_driver);
2444 etm4_pm_clear();
2445 }
2446
2447 module_init(etm4x_init);
2448 module_exit(etm4x_exit);
2449
2450 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
2451 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
2452 MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace v4.x driver");
2453 MODULE_LICENSE("GPL v2");
2454