xref: /linux/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c (revision e7e2296b0ecf9b6e934f7a1118cee91d4d486a84)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/coresight.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/sysfs.h>
12 #include "coresight-etm4x.h"
13 #include "coresight-priv.h"
14 #include "coresight-syscfg.h"
15 
16 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
17 {
18 	u8 idx;
19 	struct etmv4_config *config = &drvdata->config;
20 
21 	idx = config->addr_idx;
22 
23 	/*
24 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
25 	 * the trace unit performs
26 	 */
27 	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
28 		if (idx % 2 != 0)
29 			return -EINVAL;
30 
31 		/*
32 		 * We are performing instruction address comparison. Set the
33 		 * relevant bit of ViewInst Include/Exclude Control register
34 		 * for corresponding address comparator pair.
35 		 */
36 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
37 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
38 			return -EINVAL;
39 
40 		if (exclude == true) {
41 			/*
42 			 * Set exclude bit and unset the include bit
43 			 * corresponding to comparator pair
44 			 */
45 			config->viiectlr |= BIT(idx / 2 + 16);
46 			config->viiectlr &= ~BIT(idx / 2);
47 		} else {
48 			/*
49 			 * Set include bit and unset exclude bit
50 			 * corresponding to comparator pair
51 			 */
52 			config->viiectlr |= BIT(idx / 2);
53 			config->viiectlr &= ~BIT(idx / 2 + 16);
54 		}
55 	}
56 	return 0;
57 }
58 
59 static ssize_t nr_pe_cmp_show(struct device *dev,
60 			      struct device_attribute *attr,
61 			      char *buf)
62 {
63 	unsigned long val;
64 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
65 
66 	val = drvdata->nr_pe_cmp;
67 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
68 }
69 static DEVICE_ATTR_RO(nr_pe_cmp);
70 
71 static ssize_t nr_addr_cmp_show(struct device *dev,
72 				struct device_attribute *attr,
73 				char *buf)
74 {
75 	unsigned long val;
76 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
77 
78 	val = drvdata->nr_addr_cmp;
79 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
80 }
81 static DEVICE_ATTR_RO(nr_addr_cmp);
82 
83 static ssize_t nr_cntr_show(struct device *dev,
84 			    struct device_attribute *attr,
85 			    char *buf)
86 {
87 	unsigned long val;
88 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
89 
90 	val = drvdata->nr_cntr;
91 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
92 }
93 static DEVICE_ATTR_RO(nr_cntr);
94 
95 static ssize_t nr_ext_inp_show(struct device *dev,
96 			       struct device_attribute *attr,
97 			       char *buf)
98 {
99 	unsigned long val;
100 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
101 
102 	val = drvdata->nr_ext_inp;
103 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
104 }
105 static DEVICE_ATTR_RO(nr_ext_inp);
106 
107 static ssize_t numcidc_show(struct device *dev,
108 			    struct device_attribute *attr,
109 			    char *buf)
110 {
111 	unsigned long val;
112 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
113 
114 	val = drvdata->numcidc;
115 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
116 }
117 static DEVICE_ATTR_RO(numcidc);
118 
119 static ssize_t numvmidc_show(struct device *dev,
120 			     struct device_attribute *attr,
121 			     char *buf)
122 {
123 	unsigned long val;
124 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
125 
126 	val = drvdata->numvmidc;
127 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
128 }
129 static DEVICE_ATTR_RO(numvmidc);
130 
131 static ssize_t nrseqstate_show(struct device *dev,
132 			       struct device_attribute *attr,
133 			       char *buf)
134 {
135 	unsigned long val;
136 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
137 
138 	val = drvdata->nrseqstate;
139 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
140 }
141 static DEVICE_ATTR_RO(nrseqstate);
142 
143 static ssize_t nr_resource_show(struct device *dev,
144 				struct device_attribute *attr,
145 				char *buf)
146 {
147 	unsigned long val;
148 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
149 
150 	val = drvdata->nr_resource;
151 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
152 }
153 static DEVICE_ATTR_RO(nr_resource);
154 
155 static ssize_t nr_ss_cmp_show(struct device *dev,
156 			      struct device_attribute *attr,
157 			      char *buf)
158 {
159 	unsigned long val;
160 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
161 
162 	val = drvdata->nr_ss_cmp;
163 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
164 }
165 static DEVICE_ATTR_RO(nr_ss_cmp);
166 
167 static ssize_t reset_store(struct device *dev,
168 			   struct device_attribute *attr,
169 			   const char *buf, size_t size)
170 {
171 	int i;
172 	unsigned long val;
173 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
174 	struct etmv4_config *config = &drvdata->config;
175 
176 	if (kstrtoul(buf, 16, &val))
177 		return -EINVAL;
178 
179 	raw_spin_lock(&drvdata->spinlock);
180 	if (val)
181 		config->mode = 0x0;
182 
183 	/* Disable data tracing: do not trace load and store data transfers */
184 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
185 	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
186 
187 	/* Disable data value and data address tracing */
188 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
189 			   ETM_MODE_DATA_TRACE_VAL);
190 	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
191 
192 	/* Disable all events tracing */
193 	config->eventctrl0 = 0x0;
194 	config->eventctrl1 = 0x0;
195 
196 	/* Disable timestamp event */
197 	config->ts_ctrl = 0x0;
198 
199 	/* Disable stalling */
200 	config->stall_ctrl = 0x0;
201 
202 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
203 	if (drvdata->syncpr == false)
204 		config->syncfreq = 0x8;
205 
206 	/*
207 	 * Enable ViewInst to trace everything with start-stop logic in
208 	 * started state. ARM recommends start-stop logic is set before
209 	 * each trace run.
210 	 */
211 	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
212 	if (drvdata->nr_addr_cmp > 0) {
213 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
214 		/* SSSTATUS, bit[9] */
215 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
216 	}
217 
218 	/* No address range filtering for ViewInst */
219 	config->viiectlr = 0x0;
220 
221 	/* No start-stop filtering for ViewInst */
222 	config->vissctlr = 0x0;
223 	config->vipcssctlr = 0x0;
224 
225 	/* Disable seq events */
226 	for (i = 0; i < drvdata->nrseqstate-1; i++)
227 		config->seq_ctrl[i] = 0x0;
228 	config->seq_rst = 0x0;
229 	config->seq_state = 0x0;
230 
231 	/* Disable external input events */
232 	config->ext_inp = 0x0;
233 
234 	config->cntr_idx = 0x0;
235 	for (i = 0; i < drvdata->nr_cntr; i++) {
236 		config->cntrldvr[i] = 0x0;
237 		config->cntr_ctrl[i] = 0x0;
238 		config->cntr_val[i] = 0x0;
239 	}
240 
241 	config->res_idx = 0x0;
242 	for (i = 2; i < 2 * drvdata->nr_resource; i++)
243 		config->res_ctrl[i] = 0x0;
244 
245 	config->ss_idx = 0x0;
246 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
247 		config->ss_ctrl[i] = 0x0;
248 		config->ss_pe_cmp[i] = 0x0;
249 	}
250 
251 	config->addr_idx = 0x0;
252 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
253 		config->addr_val[i] = 0x0;
254 		config->addr_acc[i] = 0x0;
255 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
256 	}
257 
258 	config->ctxid_idx = 0x0;
259 	for (i = 0; i < drvdata->numcidc; i++)
260 		config->ctxid_pid[i] = 0x0;
261 
262 	config->ctxid_mask0 = 0x0;
263 	config->ctxid_mask1 = 0x0;
264 
265 	config->vmid_idx = 0x0;
266 	for (i = 0; i < drvdata->numvmidc; i++)
267 		config->vmid_val[i] = 0x0;
268 	config->vmid_mask0 = 0x0;
269 	config->vmid_mask1 = 0x0;
270 
271 	raw_spin_unlock(&drvdata->spinlock);
272 
273 	/* for sysfs - only release trace id when resetting */
274 	etm4_release_trace_id(drvdata);
275 
276 	cscfg_csdev_reset_feats(to_coresight_device(dev));
277 
278 	return size;
279 }
280 static DEVICE_ATTR_WO(reset);
281 
282 static ssize_t mode_show(struct device *dev,
283 			 struct device_attribute *attr,
284 			 char *buf)
285 {
286 	unsigned long val;
287 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
288 	struct etmv4_config *config = &drvdata->config;
289 
290 	val = config->mode;
291 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
292 }
293 
294 static ssize_t mode_store(struct device *dev,
295 			  struct device_attribute *attr,
296 			  const char *buf, size_t size)
297 {
298 	unsigned long val, mode;
299 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
300 	struct etmv4_config *config = &drvdata->config;
301 
302 	if (kstrtoul(buf, 16, &val))
303 		return -EINVAL;
304 
305 	raw_spin_lock(&drvdata->spinlock);
306 	config->mode = val & ETMv4_MODE_ALL;
307 
308 	if (drvdata->instrp0 == true) {
309 		/* start by clearing instruction P0 field */
310 		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
311 		if (config->mode & ETM_MODE_LOAD)
312 			/* 0b01 Trace load instructions as P0 instructions */
313 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
314 		if (config->mode & ETM_MODE_STORE)
315 			/* 0b10 Trace store instructions as P0 instructions */
316 			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
317 		if (config->mode & ETM_MODE_LOAD_STORE)
318 			/*
319 			 * 0b11 Trace load and store instructions
320 			 * as P0 instructions
321 			 */
322 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
323 	}
324 
325 	/* bit[3], Branch broadcast mode */
326 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
327 		config->cfg |= TRCCONFIGR_BB;
328 	else
329 		config->cfg &= ~TRCCONFIGR_BB;
330 
331 	/* bit[4], Cycle counting instruction trace bit */
332 	if ((config->mode & ETMv4_MODE_CYCACC) &&
333 		(drvdata->trccci == true))
334 		config->cfg |= TRCCONFIGR_CCI;
335 	else
336 		config->cfg &= ~TRCCONFIGR_CCI;
337 
338 	/* bit[6], Context ID tracing bit */
339 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
340 		config->cfg |= TRCCONFIGR_CID;
341 	else
342 		config->cfg &= ~TRCCONFIGR_CID;
343 
344 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
345 		config->cfg |= TRCCONFIGR_VMID;
346 	else
347 		config->cfg &= ~TRCCONFIGR_VMID;
348 
349 	/* bits[10:8], Conditional instruction tracing bit */
350 	mode = ETM_MODE_COND(config->mode);
351 	if (drvdata->trccond == true) {
352 		config->cfg &= ~TRCCONFIGR_COND_MASK;
353 		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
354 	}
355 
356 	/* bit[11], Global timestamp tracing bit */
357 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
358 		config->cfg |= TRCCONFIGR_TS;
359 	else
360 		config->cfg &= ~TRCCONFIGR_TS;
361 
362 	/* bit[12], Return stack enable bit */
363 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
364 					(drvdata->retstack == true))
365 		config->cfg |= TRCCONFIGR_RS;
366 	else
367 		config->cfg &= ~TRCCONFIGR_RS;
368 
369 	/* bits[14:13], Q element enable field */
370 	mode = ETM_MODE_QELEM(config->mode);
371 	/* start by clearing QE bits */
372 	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
373 	/*
374 	 * if supported, Q elements with instruction counts are enabled.
375 	 * Always set the low bit for any requested mode. Valid combos are
376 	 * 0b00, 0b01 and 0b11.
377 	 */
378 	if (mode && drvdata->q_support)
379 		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
380 	/*
381 	 * if supported, Q elements with and without instruction
382 	 * counts are enabled
383 	 */
384 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
385 		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
386 
387 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
388 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
389 	    (drvdata->atbtrig == true))
390 		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
391 	else
392 		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
393 
394 	/* bit[12], Low-power state behavior override bit */
395 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
396 	    (drvdata->lpoverride == true))
397 		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
398 	else
399 		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
400 
401 	/* bit[8], Instruction stall bit */
402 	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
403 		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
404 	else
405 		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
406 
407 	/* bit[10], Prioritize instruction trace bit */
408 	if (config->mode & ETM_MODE_INSTPRIO)
409 		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
410 	else
411 		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
412 
413 	/* bit[13], Trace overflow prevention bit */
414 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
415 		(drvdata->nooverflow == true))
416 		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
417 	else
418 		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
419 
420 	/* bit[9] Start/stop logic control bit */
421 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
422 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
423 	else
424 		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
425 
426 	/* bit[10], Whether a trace unit must trace a Reset exception */
427 	if (config->mode & ETM_MODE_TRACE_RESET)
428 		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
429 	else
430 		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
431 
432 	/* bit[11], Whether a trace unit must trace a system error exception */
433 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
434 		(drvdata->trc_error == true))
435 		config->vinst_ctrl |= TRCVICTLR_TRCERR;
436 	else
437 		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
438 
439 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
440 		etm4_config_trace_mode(config);
441 
442 	raw_spin_unlock(&drvdata->spinlock);
443 
444 	return size;
445 }
446 static DEVICE_ATTR_RW(mode);
447 
448 static ssize_t pe_show(struct device *dev,
449 		       struct device_attribute *attr,
450 		       char *buf)
451 {
452 	unsigned long val;
453 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
454 	struct etmv4_config *config = &drvdata->config;
455 
456 	val = config->pe_sel;
457 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
458 }
459 
460 static ssize_t pe_store(struct device *dev,
461 			struct device_attribute *attr,
462 			const char *buf, size_t size)
463 {
464 	unsigned long val;
465 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
466 	struct etmv4_config *config = &drvdata->config;
467 
468 	if (kstrtoul(buf, 16, &val))
469 		return -EINVAL;
470 
471 	raw_spin_lock(&drvdata->spinlock);
472 	if (val > drvdata->nr_pe) {
473 		raw_spin_unlock(&drvdata->spinlock);
474 		return -EINVAL;
475 	}
476 
477 	config->pe_sel = val;
478 	raw_spin_unlock(&drvdata->spinlock);
479 	return size;
480 }
481 static DEVICE_ATTR_RW(pe);
482 
483 static ssize_t event_show(struct device *dev,
484 			  struct device_attribute *attr,
485 			  char *buf)
486 {
487 	unsigned long val;
488 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
489 	struct etmv4_config *config = &drvdata->config;
490 
491 	val = config->eventctrl0;
492 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
493 }
494 
495 static ssize_t event_store(struct device *dev,
496 			   struct device_attribute *attr,
497 			   const char *buf, size_t size)
498 {
499 	unsigned long val;
500 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
501 	struct etmv4_config *config = &drvdata->config;
502 
503 	if (kstrtoul(buf, 16, &val))
504 		return -EINVAL;
505 
506 	raw_spin_lock(&drvdata->spinlock);
507 	switch (drvdata->nr_event) {
508 	case 0x0:
509 		/* EVENT0, bits[7:0] */
510 		config->eventctrl0 = val & 0xFF;
511 		break;
512 	case 0x1:
513 		 /* EVENT1, bits[15:8] */
514 		config->eventctrl0 = val & 0xFFFF;
515 		break;
516 	case 0x2:
517 		/* EVENT2, bits[23:16] */
518 		config->eventctrl0 = val & 0xFFFFFF;
519 		break;
520 	case 0x3:
521 		/* EVENT3, bits[31:24] */
522 		config->eventctrl0 = val;
523 		break;
524 	default:
525 		break;
526 	}
527 	raw_spin_unlock(&drvdata->spinlock);
528 	return size;
529 }
530 static DEVICE_ATTR_RW(event);
531 
532 static ssize_t event_instren_show(struct device *dev,
533 				  struct device_attribute *attr,
534 				  char *buf)
535 {
536 	unsigned long val;
537 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
538 	struct etmv4_config *config = &drvdata->config;
539 
540 	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
541 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
542 }
543 
544 static ssize_t event_instren_store(struct device *dev,
545 				   struct device_attribute *attr,
546 				   const char *buf, size_t size)
547 {
548 	unsigned long val;
549 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
550 	struct etmv4_config *config = &drvdata->config;
551 
552 	if (kstrtoul(buf, 16, &val))
553 		return -EINVAL;
554 
555 	raw_spin_lock(&drvdata->spinlock);
556 	/* start by clearing all instruction event enable bits */
557 	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
558 	switch (drvdata->nr_event) {
559 	case 0x0:
560 		/* generate Event element for event 1 */
561 		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
562 		break;
563 	case 0x1:
564 		/* generate Event element for event 1 and 2 */
565 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
566 		break;
567 	case 0x2:
568 		/* generate Event element for event 1, 2 and 3 */
569 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
570 					     TRCEVENTCTL1R_INSTEN_1 |
571 					     TRCEVENTCTL1R_INSTEN_2);
572 		break;
573 	case 0x3:
574 		/* generate Event element for all 4 events */
575 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
576 					     TRCEVENTCTL1R_INSTEN_1 |
577 					     TRCEVENTCTL1R_INSTEN_2 |
578 					     TRCEVENTCTL1R_INSTEN_3);
579 		break;
580 	default:
581 		break;
582 	}
583 	raw_spin_unlock(&drvdata->spinlock);
584 	return size;
585 }
586 static DEVICE_ATTR_RW(event_instren);
587 
588 static ssize_t event_ts_show(struct device *dev,
589 			     struct device_attribute *attr,
590 			     char *buf)
591 {
592 	unsigned long val;
593 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 	struct etmv4_config *config = &drvdata->config;
595 
596 	val = config->ts_ctrl;
597 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
598 }
599 
600 static ssize_t event_ts_store(struct device *dev,
601 			      struct device_attribute *attr,
602 			      const char *buf, size_t size)
603 {
604 	unsigned long val;
605 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
606 	struct etmv4_config *config = &drvdata->config;
607 
608 	if (kstrtoul(buf, 16, &val))
609 		return -EINVAL;
610 	if (!drvdata->ts_size)
611 		return -EINVAL;
612 
613 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
614 	return size;
615 }
616 static DEVICE_ATTR_RW(event_ts);
617 
618 static ssize_t syncfreq_show(struct device *dev,
619 			     struct device_attribute *attr,
620 			     char *buf)
621 {
622 	unsigned long val;
623 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 	struct etmv4_config *config = &drvdata->config;
625 
626 	val = config->syncfreq;
627 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
628 }
629 
630 static ssize_t syncfreq_store(struct device *dev,
631 			      struct device_attribute *attr,
632 			      const char *buf, size_t size)
633 {
634 	unsigned long val;
635 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
636 	struct etmv4_config *config = &drvdata->config;
637 
638 	if (kstrtoul(buf, 16, &val))
639 		return -EINVAL;
640 	if (drvdata->syncpr == true)
641 		return -EINVAL;
642 
643 	config->syncfreq = val & ETMv4_SYNC_MASK;
644 	return size;
645 }
646 static DEVICE_ATTR_RW(syncfreq);
647 
648 static ssize_t cyc_threshold_show(struct device *dev,
649 				  struct device_attribute *attr,
650 				  char *buf)
651 {
652 	unsigned long val;
653 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 	struct etmv4_config *config = &drvdata->config;
655 
656 	val = config->ccctlr;
657 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
658 }
659 
660 static ssize_t cyc_threshold_store(struct device *dev,
661 				   struct device_attribute *attr,
662 				   const char *buf, size_t size)
663 {
664 	unsigned long val;
665 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
666 	struct etmv4_config *config = &drvdata->config;
667 
668 	if (kstrtoul(buf, 16, &val))
669 		return -EINVAL;
670 
671 	/* mask off max threshold before checking min value */
672 	val &= ETM_CYC_THRESHOLD_MASK;
673 	if (val < drvdata->ccitmin)
674 		return -EINVAL;
675 
676 	config->ccctlr = val;
677 	return size;
678 }
679 static DEVICE_ATTR_RW(cyc_threshold);
680 
681 static ssize_t bb_ctrl_show(struct device *dev,
682 			    struct device_attribute *attr,
683 			    char *buf)
684 {
685 	unsigned long val;
686 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
687 	struct etmv4_config *config = &drvdata->config;
688 
689 	val = config->bb_ctrl;
690 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
691 }
692 
693 static ssize_t bb_ctrl_store(struct device *dev,
694 			     struct device_attribute *attr,
695 			     const char *buf, size_t size)
696 {
697 	unsigned long val;
698 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
699 	struct etmv4_config *config = &drvdata->config;
700 
701 	if (kstrtoul(buf, 16, &val))
702 		return -EINVAL;
703 	if (drvdata->trcbb == false)
704 		return -EINVAL;
705 	if (!drvdata->nr_addr_cmp)
706 		return -EINVAL;
707 
708 	/*
709 	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
710 	 * individual range comparators. If include then at least 1
711 	 * range must be selected.
712 	 */
713 	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
714 		return -EINVAL;
715 
716 	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
717 	return size;
718 }
719 static DEVICE_ATTR_RW(bb_ctrl);
720 
721 static ssize_t event_vinst_show(struct device *dev,
722 				struct device_attribute *attr,
723 				char *buf)
724 {
725 	unsigned long val;
726 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
727 	struct etmv4_config *config = &drvdata->config;
728 
729 	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
730 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
731 }
732 
733 static ssize_t event_vinst_store(struct device *dev,
734 				 struct device_attribute *attr,
735 				 const char *buf, size_t size)
736 {
737 	unsigned long val;
738 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
739 	struct etmv4_config *config = &drvdata->config;
740 
741 	if (kstrtoul(buf, 16, &val))
742 		return -EINVAL;
743 
744 	raw_spin_lock(&drvdata->spinlock);
745 	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
746 	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
747 	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
748 	raw_spin_unlock(&drvdata->spinlock);
749 	return size;
750 }
751 static DEVICE_ATTR_RW(event_vinst);
752 
753 static ssize_t s_exlevel_vinst_show(struct device *dev,
754 				    struct device_attribute *attr,
755 				    char *buf)
756 {
757 	unsigned long val;
758 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
759 	struct etmv4_config *config = &drvdata->config;
760 
761 	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
762 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
763 }
764 
765 static ssize_t s_exlevel_vinst_store(struct device *dev,
766 				     struct device_attribute *attr,
767 				     const char *buf, size_t size)
768 {
769 	unsigned long val;
770 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
771 	struct etmv4_config *config = &drvdata->config;
772 
773 	if (kstrtoul(buf, 16, &val))
774 		return -EINVAL;
775 
776 	raw_spin_lock(&drvdata->spinlock);
777 	/* clear all EXLEVEL_S bits  */
778 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
779 	/* enable instruction tracing for corresponding exception level */
780 	val &= drvdata->s_ex_level;
781 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
782 	raw_spin_unlock(&drvdata->spinlock);
783 	return size;
784 }
785 static DEVICE_ATTR_RW(s_exlevel_vinst);
786 
787 static ssize_t ns_exlevel_vinst_show(struct device *dev,
788 				     struct device_attribute *attr,
789 				     char *buf)
790 {
791 	unsigned long val;
792 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
793 	struct etmv4_config *config = &drvdata->config;
794 
795 	/* EXLEVEL_NS, bits[23:20] */
796 	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
797 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
798 }
799 
800 static ssize_t ns_exlevel_vinst_store(struct device *dev,
801 				      struct device_attribute *attr,
802 				      const char *buf, size_t size)
803 {
804 	unsigned long val;
805 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
806 	struct etmv4_config *config = &drvdata->config;
807 
808 	if (kstrtoul(buf, 16, &val))
809 		return -EINVAL;
810 
811 	raw_spin_lock(&drvdata->spinlock);
812 	/* clear EXLEVEL_NS bits  */
813 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
814 	/* enable instruction tracing for corresponding exception level */
815 	val &= drvdata->ns_ex_level;
816 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
817 	raw_spin_unlock(&drvdata->spinlock);
818 	return size;
819 }
820 static DEVICE_ATTR_RW(ns_exlevel_vinst);
821 
822 static ssize_t addr_idx_show(struct device *dev,
823 			     struct device_attribute *attr,
824 			     char *buf)
825 {
826 	unsigned long val;
827 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
828 	struct etmv4_config *config = &drvdata->config;
829 
830 	val = config->addr_idx;
831 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
832 }
833 
834 static ssize_t addr_idx_store(struct device *dev,
835 			      struct device_attribute *attr,
836 			      const char *buf, size_t size)
837 {
838 	unsigned long val;
839 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
840 	struct etmv4_config *config = &drvdata->config;
841 
842 	if (kstrtoul(buf, 16, &val))
843 		return -EINVAL;
844 	if (val >= drvdata->nr_addr_cmp * 2)
845 		return -EINVAL;
846 
847 	/*
848 	 * Use spinlock to ensure index doesn't change while it gets
849 	 * dereferenced multiple times within a spinlock block elsewhere.
850 	 */
851 	raw_spin_lock(&drvdata->spinlock);
852 	config->addr_idx = val;
853 	raw_spin_unlock(&drvdata->spinlock);
854 	return size;
855 }
856 static DEVICE_ATTR_RW(addr_idx);
857 
858 static ssize_t addr_instdatatype_show(struct device *dev,
859 				      struct device_attribute *attr,
860 				      char *buf)
861 {
862 	ssize_t len;
863 	u8 val, idx;
864 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
865 	struct etmv4_config *config = &drvdata->config;
866 
867 	raw_spin_lock(&drvdata->spinlock);
868 	idx = config->addr_idx;
869 	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
870 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
871 			val == TRCACATRn_TYPE_ADDR ? "instr" :
872 			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
873 			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
874 			"data_load_store")));
875 	raw_spin_unlock(&drvdata->spinlock);
876 	return len;
877 }
878 
879 static ssize_t addr_instdatatype_store(struct device *dev,
880 				       struct device_attribute *attr,
881 				       const char *buf, size_t size)
882 {
883 	u8 idx;
884 	char str[20] = "";
885 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
886 	struct etmv4_config *config = &drvdata->config;
887 
888 	if (strlen(buf) >= 20)
889 		return -EINVAL;
890 	if (sscanf(buf, "%s", str) != 1)
891 		return -EINVAL;
892 
893 	raw_spin_lock(&drvdata->spinlock);
894 	idx = config->addr_idx;
895 	if (!strcmp(str, "instr"))
896 		/* TYPE, bits[1:0] */
897 		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
898 
899 	raw_spin_unlock(&drvdata->spinlock);
900 	return size;
901 }
902 static DEVICE_ATTR_RW(addr_instdatatype);
903 
904 static ssize_t addr_single_show(struct device *dev,
905 				struct device_attribute *attr,
906 				char *buf)
907 {
908 	u8 idx;
909 	unsigned long val;
910 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
911 	struct etmv4_config *config = &drvdata->config;
912 
913 	idx = config->addr_idx;
914 	raw_spin_lock(&drvdata->spinlock);
915 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
916 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
917 		raw_spin_unlock(&drvdata->spinlock);
918 		return -EPERM;
919 	}
920 	val = (unsigned long)config->addr_val[idx];
921 	raw_spin_unlock(&drvdata->spinlock);
922 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
923 }
924 
925 static ssize_t addr_single_store(struct device *dev,
926 				 struct device_attribute *attr,
927 				 const char *buf, size_t size)
928 {
929 	u8 idx;
930 	unsigned long val;
931 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
932 	struct etmv4_config *config = &drvdata->config;
933 
934 	if (kstrtoul(buf, 16, &val))
935 		return -EINVAL;
936 
937 	raw_spin_lock(&drvdata->spinlock);
938 	idx = config->addr_idx;
939 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
940 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
941 		raw_spin_unlock(&drvdata->spinlock);
942 		return -EPERM;
943 	}
944 
945 	config->addr_val[idx] = (u64)val;
946 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
947 	raw_spin_unlock(&drvdata->spinlock);
948 	return size;
949 }
950 static DEVICE_ATTR_RW(addr_single);
951 
952 static ssize_t addr_range_show(struct device *dev,
953 			       struct device_attribute *attr,
954 			       char *buf)
955 {
956 	u8 idx;
957 	unsigned long val1, val2;
958 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
959 	struct etmv4_config *config = &drvdata->config;
960 
961 	raw_spin_lock(&drvdata->spinlock);
962 	idx = config->addr_idx;
963 	if (idx % 2 != 0) {
964 		raw_spin_unlock(&drvdata->spinlock);
965 		return -EPERM;
966 	}
967 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
968 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
969 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
970 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
971 		raw_spin_unlock(&drvdata->spinlock);
972 		return -EPERM;
973 	}
974 
975 	val1 = (unsigned long)config->addr_val[idx];
976 	val2 = (unsigned long)config->addr_val[idx + 1];
977 	raw_spin_unlock(&drvdata->spinlock);
978 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
979 }
980 
981 static ssize_t addr_range_store(struct device *dev,
982 				struct device_attribute *attr,
983 				const char *buf, size_t size)
984 {
985 	u8 idx;
986 	unsigned long val1, val2;
987 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
988 	struct etmv4_config *config = &drvdata->config;
989 	int elements, exclude;
990 
991 	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
992 
993 	/*  exclude is optional, but need at least two parameter */
994 	if (elements < 2)
995 		return -EINVAL;
996 	/* lower address comparator cannot have a higher address value */
997 	if (val1 > val2)
998 		return -EINVAL;
999 
1000 	raw_spin_lock(&drvdata->spinlock);
1001 	idx = config->addr_idx;
1002 	if (idx % 2 != 0) {
1003 		raw_spin_unlock(&drvdata->spinlock);
1004 		return -EPERM;
1005 	}
1006 
1007 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1008 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1009 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1010 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1011 		raw_spin_unlock(&drvdata->spinlock);
1012 		return -EPERM;
1013 	}
1014 
1015 	config->addr_val[idx] = (u64)val1;
1016 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1017 	config->addr_val[idx + 1] = (u64)val2;
1018 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1019 	/*
1020 	 * Program include or exclude control bits for vinst or vdata
1021 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1022 	 * use supplied value, or default to bit set in 'mode'
1023 	 */
1024 	if (elements != 3)
1025 		exclude = config->mode & ETM_MODE_EXCLUDE;
1026 	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1027 
1028 	raw_spin_unlock(&drvdata->spinlock);
1029 	return size;
1030 }
1031 static DEVICE_ATTR_RW(addr_range);
1032 
1033 static ssize_t addr_start_show(struct device *dev,
1034 			       struct device_attribute *attr,
1035 			       char *buf)
1036 {
1037 	u8 idx;
1038 	unsigned long val;
1039 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1040 	struct etmv4_config *config = &drvdata->config;
1041 
1042 	raw_spin_lock(&drvdata->spinlock);
1043 	idx = config->addr_idx;
1044 
1045 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1046 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1047 		raw_spin_unlock(&drvdata->spinlock);
1048 		return -EPERM;
1049 	}
1050 
1051 	val = (unsigned long)config->addr_val[idx];
1052 	raw_spin_unlock(&drvdata->spinlock);
1053 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1054 }
1055 
1056 static ssize_t addr_start_store(struct device *dev,
1057 				struct device_attribute *attr,
1058 				const char *buf, size_t size)
1059 {
1060 	u8 idx;
1061 	unsigned long val;
1062 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1063 	struct etmv4_config *config = &drvdata->config;
1064 
1065 	if (kstrtoul(buf, 16, &val))
1066 		return -EINVAL;
1067 
1068 	raw_spin_lock(&drvdata->spinlock);
1069 	idx = config->addr_idx;
1070 	if (!drvdata->nr_addr_cmp) {
1071 		raw_spin_unlock(&drvdata->spinlock);
1072 		return -EINVAL;
1073 	}
1074 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1075 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1076 		raw_spin_unlock(&drvdata->spinlock);
1077 		return -EPERM;
1078 	}
1079 
1080 	config->addr_val[idx] = (u64)val;
1081 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1082 	config->vissctlr |= BIT(idx);
1083 	raw_spin_unlock(&drvdata->spinlock);
1084 	return size;
1085 }
1086 static DEVICE_ATTR_RW(addr_start);
1087 
1088 static ssize_t addr_stop_show(struct device *dev,
1089 			      struct device_attribute *attr,
1090 			      char *buf)
1091 {
1092 	u8 idx;
1093 	unsigned long val;
1094 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1095 	struct etmv4_config *config = &drvdata->config;
1096 
1097 	raw_spin_lock(&drvdata->spinlock);
1098 	idx = config->addr_idx;
1099 
1100 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1101 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1102 		raw_spin_unlock(&drvdata->spinlock);
1103 		return -EPERM;
1104 	}
1105 
1106 	val = (unsigned long)config->addr_val[idx];
1107 	raw_spin_unlock(&drvdata->spinlock);
1108 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1109 }
1110 
1111 static ssize_t addr_stop_store(struct device *dev,
1112 			       struct device_attribute *attr,
1113 			       const char *buf, size_t size)
1114 {
1115 	u8 idx;
1116 	unsigned long val;
1117 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1118 	struct etmv4_config *config = &drvdata->config;
1119 
1120 	if (kstrtoul(buf, 16, &val))
1121 		return -EINVAL;
1122 
1123 	raw_spin_lock(&drvdata->spinlock);
1124 	idx = config->addr_idx;
1125 	if (!drvdata->nr_addr_cmp) {
1126 		raw_spin_unlock(&drvdata->spinlock);
1127 		return -EINVAL;
1128 	}
1129 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1130 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1131 		raw_spin_unlock(&drvdata->spinlock);
1132 		return -EPERM;
1133 	}
1134 
1135 	config->addr_val[idx] = (u64)val;
1136 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1137 	config->vissctlr |= BIT(idx + 16);
1138 	raw_spin_unlock(&drvdata->spinlock);
1139 	return size;
1140 }
1141 static DEVICE_ATTR_RW(addr_stop);
1142 
1143 static ssize_t addr_ctxtype_show(struct device *dev,
1144 				 struct device_attribute *attr,
1145 				 char *buf)
1146 {
1147 	ssize_t len;
1148 	u8 idx, val;
1149 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1150 	struct etmv4_config *config = &drvdata->config;
1151 
1152 	raw_spin_lock(&drvdata->spinlock);
1153 	idx = config->addr_idx;
1154 	/* CONTEXTTYPE, bits[3:2] */
1155 	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1156 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1157 			(val == ETM_CTX_CTXID ? "ctxid" :
1158 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1159 	raw_spin_unlock(&drvdata->spinlock);
1160 	return len;
1161 }
1162 
1163 static ssize_t addr_ctxtype_store(struct device *dev,
1164 				  struct device_attribute *attr,
1165 				  const char *buf, size_t size)
1166 {
1167 	u8 idx;
1168 	char str[10] = "";
1169 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1170 	struct etmv4_config *config = &drvdata->config;
1171 
1172 	if (strlen(buf) >= 10)
1173 		return -EINVAL;
1174 	if (sscanf(buf, "%s", str) != 1)
1175 		return -EINVAL;
1176 
1177 	raw_spin_lock(&drvdata->spinlock);
1178 	idx = config->addr_idx;
1179 	if (!strcmp(str, "none"))
1180 		/* start by clearing context type bits */
1181 		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1182 	else if (!strcmp(str, "ctxid")) {
1183 		/* 0b01 The trace unit performs a Context ID */
1184 		if (drvdata->numcidc) {
1185 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1186 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1187 		}
1188 	} else if (!strcmp(str, "vmid")) {
1189 		/* 0b10 The trace unit performs a VMID */
1190 		if (drvdata->numvmidc) {
1191 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1192 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1193 		}
1194 	} else if (!strcmp(str, "all")) {
1195 		/*
1196 		 * 0b11 The trace unit performs a Context ID
1197 		 * comparison and a VMID
1198 		 */
1199 		if (drvdata->numcidc)
1200 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1201 		if (drvdata->numvmidc)
1202 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1203 	}
1204 	raw_spin_unlock(&drvdata->spinlock);
1205 	return size;
1206 }
1207 static DEVICE_ATTR_RW(addr_ctxtype);
1208 
1209 static ssize_t addr_context_show(struct device *dev,
1210 				 struct device_attribute *attr,
1211 				 char *buf)
1212 {
1213 	u8 idx;
1214 	unsigned long val;
1215 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1216 	struct etmv4_config *config = &drvdata->config;
1217 
1218 	raw_spin_lock(&drvdata->spinlock);
1219 	idx = config->addr_idx;
1220 	/* context ID comparator bits[6:4] */
1221 	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1222 	raw_spin_unlock(&drvdata->spinlock);
1223 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1224 }
1225 
1226 static ssize_t addr_context_store(struct device *dev,
1227 				  struct device_attribute *attr,
1228 				  const char *buf, size_t size)
1229 {
1230 	u8 idx;
1231 	unsigned long val;
1232 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1233 	struct etmv4_config *config = &drvdata->config;
1234 
1235 	if (kstrtoul(buf, 16, &val))
1236 		return -EINVAL;
1237 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1238 		return -EINVAL;
1239 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1240 		     drvdata->numcidc : drvdata->numvmidc))
1241 		return -EINVAL;
1242 
1243 	raw_spin_lock(&drvdata->spinlock);
1244 	idx = config->addr_idx;
1245 	/* clear context ID comparator bits[6:4] */
1246 	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1247 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1248 	raw_spin_unlock(&drvdata->spinlock);
1249 	return size;
1250 }
1251 static DEVICE_ATTR_RW(addr_context);
1252 
1253 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1254 				      struct device_attribute *attr,
1255 				      char *buf)
1256 {
1257 	u8 idx;
1258 	unsigned long val;
1259 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1260 	struct etmv4_config *config = &drvdata->config;
1261 
1262 	raw_spin_lock(&drvdata->spinlock);
1263 	idx = config->addr_idx;
1264 	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1265 	raw_spin_unlock(&drvdata->spinlock);
1266 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1267 }
1268 
1269 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1270 				       struct device_attribute *attr,
1271 				       const char *buf, size_t size)
1272 {
1273 	u8 idx;
1274 	unsigned long val;
1275 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1276 	struct etmv4_config *config = &drvdata->config;
1277 
1278 	if (kstrtoul(buf, 0, &val))
1279 		return -EINVAL;
1280 
1281 	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1282 		return -EINVAL;
1283 
1284 	raw_spin_lock(&drvdata->spinlock);
1285 	idx = config->addr_idx;
1286 	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1287 	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1288 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1289 	raw_spin_unlock(&drvdata->spinlock);
1290 	return size;
1291 }
1292 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1293 
1294 static const char * const addr_type_names[] = {
1295 	"unused",
1296 	"single",
1297 	"range",
1298 	"start",
1299 	"stop"
1300 };
1301 
1302 static ssize_t addr_cmp_view_show(struct device *dev,
1303 				  struct device_attribute *attr, char *buf)
1304 {
1305 	u8 idx, addr_type;
1306 	unsigned long addr_v, addr_v2, addr_ctrl;
1307 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1308 	struct etmv4_config *config = &drvdata->config;
1309 	int size = 0;
1310 	bool exclude = false;
1311 
1312 	raw_spin_lock(&drvdata->spinlock);
1313 	idx = config->addr_idx;
1314 	addr_v = config->addr_val[idx];
1315 	addr_ctrl = config->addr_acc[idx];
1316 	addr_type = config->addr_type[idx];
1317 	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1318 		if (idx & 0x1) {
1319 			idx -= 1;
1320 			addr_v2 = addr_v;
1321 			addr_v = config->addr_val[idx];
1322 		} else {
1323 			addr_v2 = config->addr_val[idx + 1];
1324 		}
1325 		exclude = config->viiectlr & BIT(idx / 2 + 16);
1326 	}
1327 	raw_spin_unlock(&drvdata->spinlock);
1328 	if (addr_type) {
1329 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1330 				 addr_type_names[addr_type], addr_v);
1331 		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1332 			size += scnprintf(buf + size, PAGE_SIZE - size,
1333 					  " %#lx %s", addr_v2,
1334 					  exclude ? "exclude" : "include");
1335 		}
1336 		size += scnprintf(buf + size, PAGE_SIZE - size,
1337 				  " ctrl(%#lx)\n", addr_ctrl);
1338 	} else {
1339 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1340 	}
1341 	return size;
1342 }
1343 static DEVICE_ATTR_RO(addr_cmp_view);
1344 
1345 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1346 					    struct device_attribute *attr,
1347 					    char *buf)
1348 {
1349 	unsigned long val;
1350 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1351 	struct etmv4_config *config = &drvdata->config;
1352 
1353 	if (!drvdata->nr_pe_cmp)
1354 		return -EINVAL;
1355 	val = config->vipcssctlr;
1356 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1357 }
1358 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1359 					     struct device_attribute *attr,
1360 					     const char *buf, size_t size)
1361 {
1362 	unsigned long val;
1363 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1364 	struct etmv4_config *config = &drvdata->config;
1365 
1366 	if (kstrtoul(buf, 16, &val))
1367 		return -EINVAL;
1368 	if (!drvdata->nr_pe_cmp)
1369 		return -EINVAL;
1370 
1371 	raw_spin_lock(&drvdata->spinlock);
1372 	config->vipcssctlr = val;
1373 	raw_spin_unlock(&drvdata->spinlock);
1374 	return size;
1375 }
1376 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1377 
1378 static ssize_t seq_idx_show(struct device *dev,
1379 			    struct device_attribute *attr,
1380 			    char *buf)
1381 {
1382 	unsigned long val;
1383 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1384 	struct etmv4_config *config = &drvdata->config;
1385 
1386 	val = config->seq_idx;
1387 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1388 }
1389 
1390 static ssize_t seq_idx_store(struct device *dev,
1391 			     struct device_attribute *attr,
1392 			     const char *buf, size_t size)
1393 {
1394 	unsigned long val;
1395 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1396 	struct etmv4_config *config = &drvdata->config;
1397 
1398 	if (kstrtoul(buf, 16, &val))
1399 		return -EINVAL;
1400 	if (val >= drvdata->nrseqstate - 1)
1401 		return -EINVAL;
1402 
1403 	/*
1404 	 * Use spinlock to ensure index doesn't change while it gets
1405 	 * dereferenced multiple times within a spinlock block elsewhere.
1406 	 */
1407 	raw_spin_lock(&drvdata->spinlock);
1408 	config->seq_idx = val;
1409 	raw_spin_unlock(&drvdata->spinlock);
1410 	return size;
1411 }
1412 static DEVICE_ATTR_RW(seq_idx);
1413 
1414 static ssize_t seq_state_show(struct device *dev,
1415 			      struct device_attribute *attr,
1416 			      char *buf)
1417 {
1418 	unsigned long val;
1419 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1420 	struct etmv4_config *config = &drvdata->config;
1421 
1422 	val = config->seq_state;
1423 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1424 }
1425 
1426 static ssize_t seq_state_store(struct device *dev,
1427 			       struct device_attribute *attr,
1428 			       const char *buf, size_t size)
1429 {
1430 	unsigned long val;
1431 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1432 	struct etmv4_config *config = &drvdata->config;
1433 
1434 	if (kstrtoul(buf, 16, &val))
1435 		return -EINVAL;
1436 	if (val >= drvdata->nrseqstate)
1437 		return -EINVAL;
1438 
1439 	config->seq_state = val;
1440 	return size;
1441 }
1442 static DEVICE_ATTR_RW(seq_state);
1443 
1444 static ssize_t seq_event_show(struct device *dev,
1445 			      struct device_attribute *attr,
1446 			      char *buf)
1447 {
1448 	u8 idx;
1449 	unsigned long val;
1450 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1451 	struct etmv4_config *config = &drvdata->config;
1452 
1453 	raw_spin_lock(&drvdata->spinlock);
1454 	idx = config->seq_idx;
1455 	val = config->seq_ctrl[idx];
1456 	raw_spin_unlock(&drvdata->spinlock);
1457 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1458 }
1459 
1460 static ssize_t seq_event_store(struct device *dev,
1461 			       struct device_attribute *attr,
1462 			       const char *buf, size_t size)
1463 {
1464 	u8 idx;
1465 	unsigned long val;
1466 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1467 	struct etmv4_config *config = &drvdata->config;
1468 
1469 	if (kstrtoul(buf, 16, &val))
1470 		return -EINVAL;
1471 
1472 	raw_spin_lock(&drvdata->spinlock);
1473 	idx = config->seq_idx;
1474 	/* Seq control has two masks B[15:8] F[7:0] */
1475 	config->seq_ctrl[idx] = val & 0xFFFF;
1476 	raw_spin_unlock(&drvdata->spinlock);
1477 	return size;
1478 }
1479 static DEVICE_ATTR_RW(seq_event);
1480 
1481 static ssize_t seq_reset_event_show(struct device *dev,
1482 				    struct device_attribute *attr,
1483 				    char *buf)
1484 {
1485 	unsigned long val;
1486 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1487 	struct etmv4_config *config = &drvdata->config;
1488 
1489 	val = config->seq_rst;
1490 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1491 }
1492 
1493 static ssize_t seq_reset_event_store(struct device *dev,
1494 				     struct device_attribute *attr,
1495 				     const char *buf, size_t size)
1496 {
1497 	unsigned long val;
1498 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 	struct etmv4_config *config = &drvdata->config;
1500 
1501 	if (kstrtoul(buf, 16, &val))
1502 		return -EINVAL;
1503 	if (!(drvdata->nrseqstate))
1504 		return -EINVAL;
1505 
1506 	config->seq_rst = val & ETMv4_EVENT_MASK;
1507 	return size;
1508 }
1509 static DEVICE_ATTR_RW(seq_reset_event);
1510 
1511 static ssize_t cntr_idx_show(struct device *dev,
1512 			     struct device_attribute *attr,
1513 			     char *buf)
1514 {
1515 	unsigned long val;
1516 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1517 	struct etmv4_config *config = &drvdata->config;
1518 
1519 	val = config->cntr_idx;
1520 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1521 }
1522 
1523 static ssize_t cntr_idx_store(struct device *dev,
1524 			      struct device_attribute *attr,
1525 			      const char *buf, size_t size)
1526 {
1527 	unsigned long val;
1528 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1529 	struct etmv4_config *config = &drvdata->config;
1530 
1531 	if (kstrtoul(buf, 16, &val))
1532 		return -EINVAL;
1533 	if (val >= drvdata->nr_cntr)
1534 		return -EINVAL;
1535 
1536 	/*
1537 	 * Use spinlock to ensure index doesn't change while it gets
1538 	 * dereferenced multiple times within a spinlock block elsewhere.
1539 	 */
1540 	raw_spin_lock(&drvdata->spinlock);
1541 	config->cntr_idx = val;
1542 	raw_spin_unlock(&drvdata->spinlock);
1543 	return size;
1544 }
1545 static DEVICE_ATTR_RW(cntr_idx);
1546 
1547 static ssize_t cntrldvr_show(struct device *dev,
1548 			     struct device_attribute *attr,
1549 			     char *buf)
1550 {
1551 	u8 idx;
1552 	unsigned long val;
1553 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1554 	struct etmv4_config *config = &drvdata->config;
1555 
1556 	raw_spin_lock(&drvdata->spinlock);
1557 	idx = config->cntr_idx;
1558 	val = config->cntrldvr[idx];
1559 	raw_spin_unlock(&drvdata->spinlock);
1560 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1561 }
1562 
1563 static ssize_t cntrldvr_store(struct device *dev,
1564 			      struct device_attribute *attr,
1565 			      const char *buf, size_t size)
1566 {
1567 	u8 idx;
1568 	unsigned long val;
1569 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1570 	struct etmv4_config *config = &drvdata->config;
1571 
1572 	if (kstrtoul(buf, 16, &val))
1573 		return -EINVAL;
1574 	if (val > ETM_CNTR_MAX_VAL)
1575 		return -EINVAL;
1576 
1577 	raw_spin_lock(&drvdata->spinlock);
1578 	idx = config->cntr_idx;
1579 	config->cntrldvr[idx] = val;
1580 	raw_spin_unlock(&drvdata->spinlock);
1581 	return size;
1582 }
1583 static DEVICE_ATTR_RW(cntrldvr);
1584 
1585 static ssize_t cntr_val_show(struct device *dev,
1586 			     struct device_attribute *attr,
1587 			     char *buf)
1588 {
1589 	u8 idx;
1590 	unsigned long val;
1591 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1592 	struct etmv4_config *config = &drvdata->config;
1593 
1594 	raw_spin_lock(&drvdata->spinlock);
1595 	idx = config->cntr_idx;
1596 	val = config->cntr_val[idx];
1597 	raw_spin_unlock(&drvdata->spinlock);
1598 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1599 }
1600 
1601 static ssize_t cntr_val_store(struct device *dev,
1602 			      struct device_attribute *attr,
1603 			      const char *buf, size_t size)
1604 {
1605 	u8 idx;
1606 	unsigned long val;
1607 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1608 	struct etmv4_config *config = &drvdata->config;
1609 
1610 	if (kstrtoul(buf, 16, &val))
1611 		return -EINVAL;
1612 	if (val > ETM_CNTR_MAX_VAL)
1613 		return -EINVAL;
1614 
1615 	raw_spin_lock(&drvdata->spinlock);
1616 	idx = config->cntr_idx;
1617 	config->cntr_val[idx] = val;
1618 	raw_spin_unlock(&drvdata->spinlock);
1619 	return size;
1620 }
1621 static DEVICE_ATTR_RW(cntr_val);
1622 
1623 static ssize_t cntr_ctrl_show(struct device *dev,
1624 			      struct device_attribute *attr,
1625 			      char *buf)
1626 {
1627 	u8 idx;
1628 	unsigned long val;
1629 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1630 	struct etmv4_config *config = &drvdata->config;
1631 
1632 	raw_spin_lock(&drvdata->spinlock);
1633 	idx = config->cntr_idx;
1634 	val = config->cntr_ctrl[idx];
1635 	raw_spin_unlock(&drvdata->spinlock);
1636 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1637 }
1638 
1639 static ssize_t cntr_ctrl_store(struct device *dev,
1640 			       struct device_attribute *attr,
1641 			       const char *buf, size_t size)
1642 {
1643 	u8 idx;
1644 	unsigned long val;
1645 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1646 	struct etmv4_config *config = &drvdata->config;
1647 
1648 	if (kstrtoul(buf, 16, &val))
1649 		return -EINVAL;
1650 
1651 	raw_spin_lock(&drvdata->spinlock);
1652 	idx = config->cntr_idx;
1653 	config->cntr_ctrl[idx] = val;
1654 	raw_spin_unlock(&drvdata->spinlock);
1655 	return size;
1656 }
1657 static DEVICE_ATTR_RW(cntr_ctrl);
1658 
1659 static ssize_t res_idx_show(struct device *dev,
1660 			    struct device_attribute *attr,
1661 			    char *buf)
1662 {
1663 	unsigned long val;
1664 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1665 	struct etmv4_config *config = &drvdata->config;
1666 
1667 	val = config->res_idx;
1668 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1669 }
1670 
1671 static ssize_t res_idx_store(struct device *dev,
1672 			     struct device_attribute *attr,
1673 			     const char *buf, size_t size)
1674 {
1675 	unsigned long val;
1676 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1677 	struct etmv4_config *config = &drvdata->config;
1678 
1679 	if (kstrtoul(buf, 16, &val))
1680 		return -EINVAL;
1681 	/*
1682 	 * Resource selector pair 0 is always implemented and reserved,
1683 	 * namely an idx with 0 and 1 is illegal.
1684 	 */
1685 	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1686 		return -EINVAL;
1687 
1688 	/*
1689 	 * Use spinlock to ensure index doesn't change while it gets
1690 	 * dereferenced multiple times within a spinlock block elsewhere.
1691 	 */
1692 	raw_spin_lock(&drvdata->spinlock);
1693 	config->res_idx = val;
1694 	raw_spin_unlock(&drvdata->spinlock);
1695 	return size;
1696 }
1697 static DEVICE_ATTR_RW(res_idx);
1698 
1699 static ssize_t res_ctrl_show(struct device *dev,
1700 			     struct device_attribute *attr,
1701 			     char *buf)
1702 {
1703 	u8 idx;
1704 	unsigned long val;
1705 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1706 	struct etmv4_config *config = &drvdata->config;
1707 
1708 	raw_spin_lock(&drvdata->spinlock);
1709 	idx = config->res_idx;
1710 	val = config->res_ctrl[idx];
1711 	raw_spin_unlock(&drvdata->spinlock);
1712 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1713 }
1714 
1715 static ssize_t res_ctrl_store(struct device *dev,
1716 			      struct device_attribute *attr,
1717 			      const char *buf, size_t size)
1718 {
1719 	u8 idx;
1720 	unsigned long val;
1721 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1722 	struct etmv4_config *config = &drvdata->config;
1723 
1724 	if (kstrtoul(buf, 16, &val))
1725 		return -EINVAL;
1726 
1727 	raw_spin_lock(&drvdata->spinlock);
1728 	idx = config->res_idx;
1729 	/* For odd idx pair inversal bit is RES0 */
1730 	if (idx % 2 != 0)
1731 		/* PAIRINV, bit[21] */
1732 		val &= ~TRCRSCTLRn_PAIRINV;
1733 	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1734 				       TRCRSCTLRn_INV |
1735 				       TRCRSCTLRn_GROUP_MASK |
1736 				       TRCRSCTLRn_SELECT_MASK);
1737 	raw_spin_unlock(&drvdata->spinlock);
1738 	return size;
1739 }
1740 static DEVICE_ATTR_RW(res_ctrl);
1741 
1742 static ssize_t sshot_idx_show(struct device *dev,
1743 			      struct device_attribute *attr, char *buf)
1744 {
1745 	unsigned long val;
1746 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1747 	struct etmv4_config *config = &drvdata->config;
1748 
1749 	val = config->ss_idx;
1750 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1751 }
1752 
1753 static ssize_t sshot_idx_store(struct device *dev,
1754 			       struct device_attribute *attr,
1755 			       const char *buf, size_t size)
1756 {
1757 	unsigned long val;
1758 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1759 	struct etmv4_config *config = &drvdata->config;
1760 
1761 	if (kstrtoul(buf, 16, &val))
1762 		return -EINVAL;
1763 	if (val >= drvdata->nr_ss_cmp)
1764 		return -EINVAL;
1765 
1766 	raw_spin_lock(&drvdata->spinlock);
1767 	config->ss_idx = val;
1768 	raw_spin_unlock(&drvdata->spinlock);
1769 	return size;
1770 }
1771 static DEVICE_ATTR_RW(sshot_idx);
1772 
1773 static ssize_t sshot_ctrl_show(struct device *dev,
1774 			       struct device_attribute *attr,
1775 			       char *buf)
1776 {
1777 	unsigned long val;
1778 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1779 	struct etmv4_config *config = &drvdata->config;
1780 
1781 	raw_spin_lock(&drvdata->spinlock);
1782 	val = config->ss_ctrl[config->ss_idx];
1783 	raw_spin_unlock(&drvdata->spinlock);
1784 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1785 }
1786 
1787 static ssize_t sshot_ctrl_store(struct device *dev,
1788 				struct device_attribute *attr,
1789 				const char *buf, size_t size)
1790 {
1791 	u8 idx;
1792 	unsigned long val;
1793 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1794 	struct etmv4_config *config = &drvdata->config;
1795 
1796 	if (kstrtoul(buf, 16, &val))
1797 		return -EINVAL;
1798 
1799 	raw_spin_lock(&drvdata->spinlock);
1800 	idx = config->ss_idx;
1801 	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1802 	/* must clear bit 31 in related status register on programming */
1803 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1804 	raw_spin_unlock(&drvdata->spinlock);
1805 	return size;
1806 }
1807 static DEVICE_ATTR_RW(sshot_ctrl);
1808 
1809 static ssize_t sshot_status_show(struct device *dev,
1810 				 struct device_attribute *attr, char *buf)
1811 {
1812 	unsigned long val;
1813 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1814 	struct etmv4_config *config = &drvdata->config;
1815 
1816 	raw_spin_lock(&drvdata->spinlock);
1817 	val = config->ss_status[config->ss_idx];
1818 	raw_spin_unlock(&drvdata->spinlock);
1819 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1820 }
1821 static DEVICE_ATTR_RO(sshot_status);
1822 
1823 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1824 				  struct device_attribute *attr,
1825 				  char *buf)
1826 {
1827 	unsigned long val;
1828 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1829 	struct etmv4_config *config = &drvdata->config;
1830 
1831 	raw_spin_lock(&drvdata->spinlock);
1832 	val = config->ss_pe_cmp[config->ss_idx];
1833 	raw_spin_unlock(&drvdata->spinlock);
1834 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1835 }
1836 
1837 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1838 				   struct device_attribute *attr,
1839 				   const char *buf, size_t size)
1840 {
1841 	u8 idx;
1842 	unsigned long val;
1843 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1844 	struct etmv4_config *config = &drvdata->config;
1845 
1846 	if (kstrtoul(buf, 16, &val))
1847 		return -EINVAL;
1848 
1849 	raw_spin_lock(&drvdata->spinlock);
1850 	idx = config->ss_idx;
1851 	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1852 	/* must clear bit 31 in related status register on programming */
1853 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1854 	raw_spin_unlock(&drvdata->spinlock);
1855 	return size;
1856 }
1857 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1858 
1859 static ssize_t ctxid_idx_show(struct device *dev,
1860 			      struct device_attribute *attr,
1861 			      char *buf)
1862 {
1863 	unsigned long val;
1864 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1865 	struct etmv4_config *config = &drvdata->config;
1866 
1867 	val = config->ctxid_idx;
1868 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1869 }
1870 
1871 static ssize_t ctxid_idx_store(struct device *dev,
1872 			       struct device_attribute *attr,
1873 			       const char *buf, size_t size)
1874 {
1875 	unsigned long val;
1876 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1877 	struct etmv4_config *config = &drvdata->config;
1878 
1879 	if (kstrtoul(buf, 16, &val))
1880 		return -EINVAL;
1881 	if (val >= drvdata->numcidc)
1882 		return -EINVAL;
1883 
1884 	/*
1885 	 * Use spinlock to ensure index doesn't change while it gets
1886 	 * dereferenced multiple times within a spinlock block elsewhere.
1887 	 */
1888 	raw_spin_lock(&drvdata->spinlock);
1889 	config->ctxid_idx = val;
1890 	raw_spin_unlock(&drvdata->spinlock);
1891 	return size;
1892 }
1893 static DEVICE_ATTR_RW(ctxid_idx);
1894 
1895 static ssize_t ctxid_pid_show(struct device *dev,
1896 			      struct device_attribute *attr,
1897 			      char *buf)
1898 {
1899 	u8 idx;
1900 	unsigned long val;
1901 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1902 	struct etmv4_config *config = &drvdata->config;
1903 
1904 	/*
1905 	 * Don't use contextID tracing if coming from a PID namespace.  See
1906 	 * comment in ctxid_pid_store().
1907 	 */
1908 	if (task_active_pid_ns(current) != &init_pid_ns)
1909 		return -EINVAL;
1910 
1911 	raw_spin_lock(&drvdata->spinlock);
1912 	idx = config->ctxid_idx;
1913 	val = (unsigned long)config->ctxid_pid[idx];
1914 	raw_spin_unlock(&drvdata->spinlock);
1915 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1916 }
1917 
1918 static ssize_t ctxid_pid_store(struct device *dev,
1919 			       struct device_attribute *attr,
1920 			       const char *buf, size_t size)
1921 {
1922 	u8 idx;
1923 	unsigned long pid;
1924 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1925 	struct etmv4_config *config = &drvdata->config;
1926 
1927 	/*
1928 	 * When contextID tracing is enabled the tracers will insert the
1929 	 * value found in the contextID register in the trace stream.  But if
1930 	 * a process is in a namespace the PID of that process as seen from the
1931 	 * namespace won't be what the kernel sees, something that makes the
1932 	 * feature confusing and can potentially leak kernel only information.
1933 	 * As such refuse to use the feature if @current is not in the initial
1934 	 * PID namespace.
1935 	 */
1936 	if (task_active_pid_ns(current) != &init_pid_ns)
1937 		return -EINVAL;
1938 
1939 	/*
1940 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1941 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1942 	 * in length
1943 	 */
1944 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1945 		return -EINVAL;
1946 	if (kstrtoul(buf, 16, &pid))
1947 		return -EINVAL;
1948 
1949 	raw_spin_lock(&drvdata->spinlock);
1950 	idx = config->ctxid_idx;
1951 	config->ctxid_pid[idx] = (u64)pid;
1952 	raw_spin_unlock(&drvdata->spinlock);
1953 	return size;
1954 }
1955 static DEVICE_ATTR_RW(ctxid_pid);
1956 
1957 static ssize_t ctxid_masks_show(struct device *dev,
1958 				struct device_attribute *attr,
1959 				char *buf)
1960 {
1961 	unsigned long val1, val2;
1962 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1963 	struct etmv4_config *config = &drvdata->config;
1964 
1965 	/*
1966 	 * Don't use contextID tracing if coming from a PID namespace.  See
1967 	 * comment in ctxid_pid_store().
1968 	 */
1969 	if (task_active_pid_ns(current) != &init_pid_ns)
1970 		return -EINVAL;
1971 
1972 	raw_spin_lock(&drvdata->spinlock);
1973 	val1 = config->ctxid_mask0;
1974 	val2 = config->ctxid_mask1;
1975 	raw_spin_unlock(&drvdata->spinlock);
1976 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1977 }
1978 
1979 static ssize_t ctxid_masks_store(struct device *dev,
1980 				struct device_attribute *attr,
1981 				const char *buf, size_t size)
1982 {
1983 	u8 i, j, maskbyte;
1984 	unsigned long val1, val2, mask;
1985 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986 	struct etmv4_config *config = &drvdata->config;
1987 	int nr_inputs;
1988 
1989 	/*
1990 	 * Don't use contextID tracing if coming from a PID namespace.  See
1991 	 * comment in ctxid_pid_store().
1992 	 */
1993 	if (task_active_pid_ns(current) != &init_pid_ns)
1994 		return -EINVAL;
1995 
1996 	/*
1997 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1998 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1999 	 * in length
2000 	 */
2001 	if (!drvdata->ctxid_size || !drvdata->numcidc)
2002 		return -EINVAL;
2003 	/* one mask if <= 4 comparators, two for up to 8 */
2004 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2005 	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2006 		return -EINVAL;
2007 
2008 	raw_spin_lock(&drvdata->spinlock);
2009 	/*
2010 	 * each byte[0..3] controls mask value applied to ctxid
2011 	 * comparator[0..3]
2012 	 */
2013 	switch (drvdata->numcidc) {
2014 	case 0x1:
2015 		/* COMP0, bits[7:0] */
2016 		config->ctxid_mask0 = val1 & 0xFF;
2017 		break;
2018 	case 0x2:
2019 		/* COMP1, bits[15:8] */
2020 		config->ctxid_mask0 = val1 & 0xFFFF;
2021 		break;
2022 	case 0x3:
2023 		/* COMP2, bits[23:16] */
2024 		config->ctxid_mask0 = val1 & 0xFFFFFF;
2025 		break;
2026 	case 0x4:
2027 		 /* COMP3, bits[31:24] */
2028 		config->ctxid_mask0 = val1;
2029 		break;
2030 	case 0x5:
2031 		/* COMP4, bits[7:0] */
2032 		config->ctxid_mask0 = val1;
2033 		config->ctxid_mask1 = val2 & 0xFF;
2034 		break;
2035 	case 0x6:
2036 		/* COMP5, bits[15:8] */
2037 		config->ctxid_mask0 = val1;
2038 		config->ctxid_mask1 = val2 & 0xFFFF;
2039 		break;
2040 	case 0x7:
2041 		/* COMP6, bits[23:16] */
2042 		config->ctxid_mask0 = val1;
2043 		config->ctxid_mask1 = val2 & 0xFFFFFF;
2044 		break;
2045 	case 0x8:
2046 		/* COMP7, bits[31:24] */
2047 		config->ctxid_mask0 = val1;
2048 		config->ctxid_mask1 = val2;
2049 		break;
2050 	default:
2051 		break;
2052 	}
2053 	/*
2054 	 * If software sets a mask bit to 1, it must program relevant byte
2055 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2056 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2057 	 * of ctxid comparator0 value (corresponding to byte 0) register.
2058 	 */
2059 	mask = config->ctxid_mask0;
2060 	for (i = 0; i < drvdata->numcidc; i++) {
2061 		/* mask value of corresponding ctxid comparator */
2062 		maskbyte = mask & ETMv4_EVENT_MASK;
2063 		/*
2064 		 * each bit corresponds to a byte of respective ctxid comparator
2065 		 * value register
2066 		 */
2067 		for (j = 0; j < 8; j++) {
2068 			if (maskbyte & 1)
2069 				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2070 			maskbyte >>= 1;
2071 		}
2072 		/* Select the next ctxid comparator mask value */
2073 		if (i == 3)
2074 			/* ctxid comparators[4-7] */
2075 			mask = config->ctxid_mask1;
2076 		else
2077 			mask >>= 0x8;
2078 	}
2079 
2080 	raw_spin_unlock(&drvdata->spinlock);
2081 	return size;
2082 }
2083 static DEVICE_ATTR_RW(ctxid_masks);
2084 
2085 static ssize_t vmid_idx_show(struct device *dev,
2086 			     struct device_attribute *attr,
2087 			     char *buf)
2088 {
2089 	unsigned long val;
2090 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2091 	struct etmv4_config *config = &drvdata->config;
2092 
2093 	val = config->vmid_idx;
2094 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2095 }
2096 
2097 static ssize_t vmid_idx_store(struct device *dev,
2098 			      struct device_attribute *attr,
2099 			      const char *buf, size_t size)
2100 {
2101 	unsigned long val;
2102 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2103 	struct etmv4_config *config = &drvdata->config;
2104 
2105 	if (kstrtoul(buf, 16, &val))
2106 		return -EINVAL;
2107 	if (val >= drvdata->numvmidc)
2108 		return -EINVAL;
2109 
2110 	/*
2111 	 * Use spinlock to ensure index doesn't change while it gets
2112 	 * dereferenced multiple times within a spinlock block elsewhere.
2113 	 */
2114 	raw_spin_lock(&drvdata->spinlock);
2115 	config->vmid_idx = val;
2116 	raw_spin_unlock(&drvdata->spinlock);
2117 	return size;
2118 }
2119 static DEVICE_ATTR_RW(vmid_idx);
2120 
2121 static ssize_t vmid_val_show(struct device *dev,
2122 			     struct device_attribute *attr,
2123 			     char *buf)
2124 {
2125 	unsigned long val;
2126 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2127 	struct etmv4_config *config = &drvdata->config;
2128 
2129 	/*
2130 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2131 	 * See comment in ctxid_pid_store().
2132 	 */
2133 	if (!task_is_in_init_pid_ns(current))
2134 		return -EINVAL;
2135 
2136 	raw_spin_lock(&drvdata->spinlock);
2137 	val = (unsigned long)config->vmid_val[config->vmid_idx];
2138 	raw_spin_unlock(&drvdata->spinlock);
2139 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2140 }
2141 
2142 static ssize_t vmid_val_store(struct device *dev,
2143 			      struct device_attribute *attr,
2144 			      const char *buf, size_t size)
2145 {
2146 	unsigned long val;
2147 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2148 	struct etmv4_config *config = &drvdata->config;
2149 
2150 	/*
2151 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2152 	 * See comment in ctxid_pid_store().
2153 	 */
2154 	if (!task_is_in_init_pid_ns(current))
2155 		return -EINVAL;
2156 
2157 	/*
2158 	 * only implemented when vmid tracing is enabled, i.e. at least one
2159 	 * vmid comparator is implemented and at least 8 bit vmid size
2160 	 */
2161 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2162 		return -EINVAL;
2163 	if (kstrtoul(buf, 16, &val))
2164 		return -EINVAL;
2165 
2166 	raw_spin_lock(&drvdata->spinlock);
2167 	config->vmid_val[config->vmid_idx] = (u64)val;
2168 	raw_spin_unlock(&drvdata->spinlock);
2169 	return size;
2170 }
2171 static DEVICE_ATTR_RW(vmid_val);
2172 
2173 static ssize_t vmid_masks_show(struct device *dev,
2174 			       struct device_attribute *attr, char *buf)
2175 {
2176 	unsigned long val1, val2;
2177 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2178 	struct etmv4_config *config = &drvdata->config;
2179 
2180 	/*
2181 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2182 	 * See comment in ctxid_pid_store().
2183 	 */
2184 	if (!task_is_in_init_pid_ns(current))
2185 		return -EINVAL;
2186 
2187 	raw_spin_lock(&drvdata->spinlock);
2188 	val1 = config->vmid_mask0;
2189 	val2 = config->vmid_mask1;
2190 	raw_spin_unlock(&drvdata->spinlock);
2191 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2192 }
2193 
2194 static ssize_t vmid_masks_store(struct device *dev,
2195 				struct device_attribute *attr,
2196 				const char *buf, size_t size)
2197 {
2198 	u8 i, j, maskbyte;
2199 	unsigned long val1, val2, mask;
2200 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2201 	struct etmv4_config *config = &drvdata->config;
2202 	int nr_inputs;
2203 
2204 	/*
2205 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2206 	 * See comment in ctxid_pid_store().
2207 	 */
2208 	if (!task_is_in_init_pid_ns(current))
2209 		return -EINVAL;
2210 
2211 	/*
2212 	 * only implemented when vmid tracing is enabled, i.e. at least one
2213 	 * vmid comparator is implemented and at least 8 bit vmid size
2214 	 */
2215 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2216 		return -EINVAL;
2217 	/* one mask if <= 4 comparators, two for up to 8 */
2218 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2219 	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2220 		return -EINVAL;
2221 
2222 	raw_spin_lock(&drvdata->spinlock);
2223 
2224 	/*
2225 	 * each byte[0..3] controls mask value applied to vmid
2226 	 * comparator[0..3]
2227 	 */
2228 	switch (drvdata->numvmidc) {
2229 	case 0x1:
2230 		/* COMP0, bits[7:0] */
2231 		config->vmid_mask0 = val1 & 0xFF;
2232 		break;
2233 	case 0x2:
2234 		/* COMP1, bits[15:8] */
2235 		config->vmid_mask0 = val1 & 0xFFFF;
2236 		break;
2237 	case 0x3:
2238 		/* COMP2, bits[23:16] */
2239 		config->vmid_mask0 = val1 & 0xFFFFFF;
2240 		break;
2241 	case 0x4:
2242 		/* COMP3, bits[31:24] */
2243 		config->vmid_mask0 = val1;
2244 		break;
2245 	case 0x5:
2246 		/* COMP4, bits[7:0] */
2247 		config->vmid_mask0 = val1;
2248 		config->vmid_mask1 = val2 & 0xFF;
2249 		break;
2250 	case 0x6:
2251 		/* COMP5, bits[15:8] */
2252 		config->vmid_mask0 = val1;
2253 		config->vmid_mask1 = val2 & 0xFFFF;
2254 		break;
2255 	case 0x7:
2256 		/* COMP6, bits[23:16] */
2257 		config->vmid_mask0 = val1;
2258 		config->vmid_mask1 = val2 & 0xFFFFFF;
2259 		break;
2260 	case 0x8:
2261 		/* COMP7, bits[31:24] */
2262 		config->vmid_mask0 = val1;
2263 		config->vmid_mask1 = val2;
2264 		break;
2265 	default:
2266 		break;
2267 	}
2268 
2269 	/*
2270 	 * If software sets a mask bit to 1, it must program relevant byte
2271 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2272 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2273 	 * of vmid comparator0 value (corresponding to byte 0) register.
2274 	 */
2275 	mask = config->vmid_mask0;
2276 	for (i = 0; i < drvdata->numvmidc; i++) {
2277 		/* mask value of corresponding vmid comparator */
2278 		maskbyte = mask & ETMv4_EVENT_MASK;
2279 		/*
2280 		 * each bit corresponds to a byte of respective vmid comparator
2281 		 * value register
2282 		 */
2283 		for (j = 0; j < 8; j++) {
2284 			if (maskbyte & 1)
2285 				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2286 			maskbyte >>= 1;
2287 		}
2288 		/* Select the next vmid comparator mask value */
2289 		if (i == 3)
2290 			/* vmid comparators[4-7] */
2291 			mask = config->vmid_mask1;
2292 		else
2293 			mask >>= 0x8;
2294 	}
2295 	raw_spin_unlock(&drvdata->spinlock);
2296 	return size;
2297 }
2298 static DEVICE_ATTR_RW(vmid_masks);
2299 
2300 static ssize_t cpu_show(struct device *dev,
2301 			struct device_attribute *attr, char *buf)
2302 {
2303 	int val;
2304 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2305 
2306 	val = drvdata->cpu;
2307 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2308 
2309 }
2310 static DEVICE_ATTR_RO(cpu);
2311 
2312 static ssize_t ts_source_show(struct device *dev,
2313 			      struct device_attribute *attr,
2314 			      char *buf)
2315 {
2316 	int val;
2317 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2318 
2319 	if (!drvdata->trfcr) {
2320 		val = -1;
2321 		goto out;
2322 	}
2323 
2324 	val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
2325 	switch (val) {
2326 	case TRFCR_EL1_TS_VIRTUAL:
2327 	case TRFCR_EL1_TS_GUEST_PHYSICAL:
2328 	case TRFCR_EL1_TS_PHYSICAL:
2329 		break;
2330 	default:
2331 		val = -1;
2332 		break;
2333 	}
2334 
2335 out:
2336 	return sysfs_emit(buf, "%d\n", val);
2337 }
2338 static DEVICE_ATTR_RO(ts_source);
2339 
2340 static struct attribute *coresight_etmv4_attrs[] = {
2341 	&dev_attr_nr_pe_cmp.attr,
2342 	&dev_attr_nr_addr_cmp.attr,
2343 	&dev_attr_nr_cntr.attr,
2344 	&dev_attr_nr_ext_inp.attr,
2345 	&dev_attr_numcidc.attr,
2346 	&dev_attr_numvmidc.attr,
2347 	&dev_attr_nrseqstate.attr,
2348 	&dev_attr_nr_resource.attr,
2349 	&dev_attr_nr_ss_cmp.attr,
2350 	&dev_attr_reset.attr,
2351 	&dev_attr_mode.attr,
2352 	&dev_attr_pe.attr,
2353 	&dev_attr_event.attr,
2354 	&dev_attr_event_instren.attr,
2355 	&dev_attr_event_ts.attr,
2356 	&dev_attr_syncfreq.attr,
2357 	&dev_attr_cyc_threshold.attr,
2358 	&dev_attr_bb_ctrl.attr,
2359 	&dev_attr_event_vinst.attr,
2360 	&dev_attr_s_exlevel_vinst.attr,
2361 	&dev_attr_ns_exlevel_vinst.attr,
2362 	&dev_attr_addr_idx.attr,
2363 	&dev_attr_addr_instdatatype.attr,
2364 	&dev_attr_addr_single.attr,
2365 	&dev_attr_addr_range.attr,
2366 	&dev_attr_addr_start.attr,
2367 	&dev_attr_addr_stop.attr,
2368 	&dev_attr_addr_ctxtype.attr,
2369 	&dev_attr_addr_context.attr,
2370 	&dev_attr_addr_exlevel_s_ns.attr,
2371 	&dev_attr_addr_cmp_view.attr,
2372 	&dev_attr_vinst_pe_cmp_start_stop.attr,
2373 	&dev_attr_sshot_idx.attr,
2374 	&dev_attr_sshot_ctrl.attr,
2375 	&dev_attr_sshot_pe_ctrl.attr,
2376 	&dev_attr_sshot_status.attr,
2377 	&dev_attr_seq_idx.attr,
2378 	&dev_attr_seq_state.attr,
2379 	&dev_attr_seq_event.attr,
2380 	&dev_attr_seq_reset_event.attr,
2381 	&dev_attr_cntr_idx.attr,
2382 	&dev_attr_cntrldvr.attr,
2383 	&dev_attr_cntr_val.attr,
2384 	&dev_attr_cntr_ctrl.attr,
2385 	&dev_attr_res_idx.attr,
2386 	&dev_attr_res_ctrl.attr,
2387 	&dev_attr_ctxid_idx.attr,
2388 	&dev_attr_ctxid_pid.attr,
2389 	&dev_attr_ctxid_masks.attr,
2390 	&dev_attr_vmid_idx.attr,
2391 	&dev_attr_vmid_val.attr,
2392 	&dev_attr_vmid_masks.attr,
2393 	&dev_attr_cpu.attr,
2394 	&dev_attr_ts_source.attr,
2395 	NULL,
2396 };
2397 
2398 /*
2399  * Trace ID allocated dynamically on enable - but also allocate on read
2400  * in case sysfs or perf read before enable to ensure consistent metadata
2401  * information for trace decode
2402  */
2403 static ssize_t trctraceid_show(struct device *dev,
2404 			       struct device_attribute *attr,
2405 			       char *buf)
2406 {
2407 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2408 	int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL);
2409 
2410 	if (trace_id < 0)
2411 		return trace_id;
2412 
2413 	return sysfs_emit(buf, "0x%x\n", trace_id);
2414 }
2415 
2416 struct etmv4_reg {
2417 	struct coresight_device *csdev;
2418 	u32 offset;
2419 	u32 data;
2420 };
2421 
2422 static void do_smp_cross_read(void *data)
2423 {
2424 	struct etmv4_reg *reg = data;
2425 
2426 	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2427 }
2428 
2429 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2430 {
2431 	struct etmv4_reg reg;
2432 
2433 	reg.offset = offset;
2434 	reg.csdev = drvdata->csdev;
2435 
2436 	/*
2437 	 * smp cross call ensures the CPU will be powered up before
2438 	 * accessing the ETMv4 trace core registers
2439 	 */
2440 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2441 	return reg.data;
2442 }
2443 
2444 static u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2445 {
2446 	struct dev_ext_attribute *eattr;
2447 
2448 	eattr = container_of(attr, struct dev_ext_attribute, attr);
2449 	return (u32)(unsigned long)eattr->var;
2450 }
2451 
2452 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2453 					struct device_attribute *d_attr,
2454 					char *buf)
2455 {
2456 	u32 val, offset;
2457 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2458 
2459 	offset = coresight_etm4x_attr_to_offset(d_attr);
2460 
2461 	pm_runtime_get_sync(dev->parent);
2462 	val = etmv4_cross_read(drvdata, offset);
2463 	pm_runtime_put_sync(dev->parent);
2464 
2465 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2466 }
2467 
2468 static bool
2469 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2470 {
2471 	switch (offset) {
2472 	ETM_COMMON_SYSREG_LIST_CASES
2473 		/*
2474 		 * Common registers to ETE & ETM4x accessible via system
2475 		 * instructions are always implemented.
2476 		 */
2477 		return true;
2478 
2479 	ETM4x_ONLY_SYSREG_LIST_CASES
2480 		/*
2481 		 * We only support etm4x and ete. So if the device is not
2482 		 * ETE, it must be ETMv4x.
2483 		 */
2484 		return !etm4x_is_ete(drvdata);
2485 
2486 	ETM4x_MMAP_LIST_CASES
2487 		/*
2488 		 * Registers accessible only via memory-mapped registers
2489 		 * must not be accessed via system instructions.
2490 		 * We cannot access the drvdata->csdev here, as this
2491 		 * function is called during the device creation, via
2492 		 * coresight_register() and the csdev is not initialized
2493 		 * until that is done. So rely on the drvdata->base to
2494 		 * detect if we have a memory mapped access.
2495 		 * Also ETE doesn't implement memory mapped access, thus
2496 		 * it is sufficient to check that we are using mmio.
2497 		 */
2498 		return !!drvdata->base;
2499 
2500 	ETE_ONLY_SYSREG_LIST_CASES
2501 		return etm4x_is_ete(drvdata);
2502 	}
2503 
2504 	return false;
2505 }
2506 
2507 /*
2508  * Hide the ETM4x registers that may not be available on the
2509  * hardware.
2510  * There are certain management registers unavailable via system
2511  * instructions. Make those sysfs attributes hidden on such
2512  * systems.
2513  */
2514 static umode_t
2515 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2516 				     struct attribute *attr, int unused)
2517 {
2518 	struct device *dev = kobj_to_dev(kobj);
2519 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2520 	struct device_attribute *d_attr;
2521 	u32 offset;
2522 
2523 	d_attr = container_of(attr, struct device_attribute, attr);
2524 	offset = coresight_etm4x_attr_to_offset(d_attr);
2525 
2526 	if (etm4x_register_implemented(drvdata, offset))
2527 		return attr->mode;
2528 	return 0;
2529 }
2530 
2531 /*
2532  * Macro to set an RO ext attribute with offset and show function.
2533  * Offset is used in mgmt group to ensure only correct registers for
2534  * the ETM / ETE variant are visible.
2535  */
2536 #define coresight_etm4x_reg_showfn(name, offset, showfn) (	\
2537 	&((struct dev_ext_attribute[]) {			\
2538 	   {							\
2539 		__ATTR(name, 0444, showfn, NULL),		\
2540 		(void *)(unsigned long)offset			\
2541 	   }							\
2542 	})[0].attr.attr						\
2543 	)
2544 
2545 /* macro using the default coresight_etm4x_reg_show function */
2546 #define coresight_etm4x_reg(name, offset)	\
2547 	coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
2548 
2549 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2550 	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2551 	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2552 	coresight_etm4x_reg(trclsr, TRCLSR),
2553 	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2554 	coresight_etm4x_reg(trcdevid, TRCDEVID),
2555 	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2556 	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2557 	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2558 	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2559 	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2560 	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2561 	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2562 	coresight_etm4x_reg_showfn(trctraceid, TRCTRACEIDR, trctraceid_show),
2563 	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2564 	NULL,
2565 };
2566 
2567 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2568 	coresight_etm4x_reg(trcidr0, TRCIDR0),
2569 	coresight_etm4x_reg(trcidr1, TRCIDR1),
2570 	coresight_etm4x_reg(trcidr2, TRCIDR2),
2571 	coresight_etm4x_reg(trcidr3, TRCIDR3),
2572 	coresight_etm4x_reg(trcidr4, TRCIDR4),
2573 	coresight_etm4x_reg(trcidr5, TRCIDR5),
2574 	/* trcidr[6,7] are reserved */
2575 	coresight_etm4x_reg(trcidr8, TRCIDR8),
2576 	coresight_etm4x_reg(trcidr9, TRCIDR9),
2577 	coresight_etm4x_reg(trcidr10, TRCIDR10),
2578 	coresight_etm4x_reg(trcidr11, TRCIDR11),
2579 	coresight_etm4x_reg(trcidr12, TRCIDR12),
2580 	coresight_etm4x_reg(trcidr13, TRCIDR13),
2581 	NULL,
2582 };
2583 
2584 static const struct attribute_group coresight_etmv4_group = {
2585 	.attrs = coresight_etmv4_attrs,
2586 };
2587 
2588 static const struct attribute_group coresight_etmv4_mgmt_group = {
2589 	.is_visible = coresight_etm4x_attr_reg_implemented,
2590 	.attrs = coresight_etmv4_mgmt_attrs,
2591 	.name = "mgmt",
2592 };
2593 
2594 static const struct attribute_group coresight_etmv4_trcidr_group = {
2595 	.attrs = coresight_etmv4_trcidr_attrs,
2596 	.name = "trcidr",
2597 };
2598 
2599 const struct attribute_group *coresight_etmv4_groups[] = {
2600 	&coresight_etmv4_group,
2601 	&coresight_etmv4_mgmt_group,
2602 	&coresight_etmv4_trcidr_group,
2603 	NULL,
2604 };
2605