xref: /linux/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c (revision c26f4fbd58375bd6ef74f95eb73d61762ad97c59)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/coresight.h>
8 #include <linux/pid_namespace.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/sysfs.h>
11 #include "coresight-etm4x.h"
12 #include "coresight-priv.h"
13 #include "coresight-syscfg.h"
14 
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)15 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
16 {
17 	u8 idx;
18 	struct etmv4_config *config = &drvdata->config;
19 
20 	idx = config->addr_idx;
21 
22 	/*
23 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
24 	 * the trace unit performs
25 	 */
26 	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
27 		if (idx % 2 != 0)
28 			return -EINVAL;
29 
30 		/*
31 		 * We are performing instruction address comparison. Set the
32 		 * relevant bit of ViewInst Include/Exclude Control register
33 		 * for corresponding address comparator pair.
34 		 */
35 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
36 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
37 			return -EINVAL;
38 
39 		if (exclude == true) {
40 			/*
41 			 * Set exclude bit and unset the include bit
42 			 * corresponding to comparator pair
43 			 */
44 			config->viiectlr |= BIT(idx / 2 + 16);
45 			config->viiectlr &= ~BIT(idx / 2);
46 		} else {
47 			/*
48 			 * Set include bit and unset exclude bit
49 			 * corresponding to comparator pair
50 			 */
51 			config->viiectlr |= BIT(idx / 2);
52 			config->viiectlr &= ~BIT(idx / 2 + 16);
53 		}
54 	}
55 	return 0;
56 }
57 
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)58 static ssize_t nr_pe_cmp_show(struct device *dev,
59 			      struct device_attribute *attr,
60 			      char *buf)
61 {
62 	unsigned long val;
63 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
64 
65 	val = drvdata->nr_pe_cmp;
66 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
67 }
68 static DEVICE_ATTR_RO(nr_pe_cmp);
69 
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)70 static ssize_t nr_addr_cmp_show(struct device *dev,
71 				struct device_attribute *attr,
72 				char *buf)
73 {
74 	unsigned long val;
75 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
76 
77 	val = drvdata->nr_addr_cmp;
78 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
79 }
80 static DEVICE_ATTR_RO(nr_addr_cmp);
81 
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)82 static ssize_t nr_cntr_show(struct device *dev,
83 			    struct device_attribute *attr,
84 			    char *buf)
85 {
86 	unsigned long val;
87 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
88 
89 	val = drvdata->nr_cntr;
90 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
91 }
92 static DEVICE_ATTR_RO(nr_cntr);
93 
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)94 static ssize_t nr_ext_inp_show(struct device *dev,
95 			       struct device_attribute *attr,
96 			       char *buf)
97 {
98 	unsigned long val;
99 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
100 
101 	val = drvdata->nr_ext_inp;
102 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
103 }
104 static DEVICE_ATTR_RO(nr_ext_inp);
105 
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)106 static ssize_t numcidc_show(struct device *dev,
107 			    struct device_attribute *attr,
108 			    char *buf)
109 {
110 	unsigned long val;
111 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
112 
113 	val = drvdata->numcidc;
114 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
115 }
116 static DEVICE_ATTR_RO(numcidc);
117 
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)118 static ssize_t numvmidc_show(struct device *dev,
119 			     struct device_attribute *attr,
120 			     char *buf)
121 {
122 	unsigned long val;
123 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
124 
125 	val = drvdata->numvmidc;
126 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
127 }
128 static DEVICE_ATTR_RO(numvmidc);
129 
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)130 static ssize_t nrseqstate_show(struct device *dev,
131 			       struct device_attribute *attr,
132 			       char *buf)
133 {
134 	unsigned long val;
135 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
136 
137 	val = drvdata->nrseqstate;
138 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
139 }
140 static DEVICE_ATTR_RO(nrseqstate);
141 
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)142 static ssize_t nr_resource_show(struct device *dev,
143 				struct device_attribute *attr,
144 				char *buf)
145 {
146 	unsigned long val;
147 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
148 
149 	val = drvdata->nr_resource;
150 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
151 }
152 static DEVICE_ATTR_RO(nr_resource);
153 
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)154 static ssize_t nr_ss_cmp_show(struct device *dev,
155 			      struct device_attribute *attr,
156 			      char *buf)
157 {
158 	unsigned long val;
159 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
160 
161 	val = drvdata->nr_ss_cmp;
162 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
163 }
164 static DEVICE_ATTR_RO(nr_ss_cmp);
165 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)166 static ssize_t reset_store(struct device *dev,
167 			   struct device_attribute *attr,
168 			   const char *buf, size_t size)
169 {
170 	int i;
171 	unsigned long val;
172 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
173 	struct etmv4_config *config = &drvdata->config;
174 
175 	if (kstrtoul(buf, 16, &val))
176 		return -EINVAL;
177 
178 	raw_spin_lock(&drvdata->spinlock);
179 	if (val)
180 		config->mode = 0x0;
181 
182 	/* Disable data tracing: do not trace load and store data transfers */
183 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
184 	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
185 
186 	/* Disable data value and data address tracing */
187 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
188 			   ETM_MODE_DATA_TRACE_VAL);
189 	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
190 
191 	/* Disable all events tracing */
192 	config->eventctrl0 = 0x0;
193 	config->eventctrl1 = 0x0;
194 
195 	/* Disable timestamp event */
196 	config->ts_ctrl = 0x0;
197 
198 	/* Disable stalling */
199 	config->stall_ctrl = 0x0;
200 
201 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
202 	if (drvdata->syncpr == false)
203 		config->syncfreq = 0x8;
204 
205 	/*
206 	 * Enable ViewInst to trace everything with start-stop logic in
207 	 * started state. ARM recommends start-stop logic is set before
208 	 * each trace run.
209 	 */
210 	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
211 	if (drvdata->nr_addr_cmp > 0) {
212 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
213 		/* SSSTATUS, bit[9] */
214 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
215 	}
216 
217 	/* No address range filtering for ViewInst */
218 	config->viiectlr = 0x0;
219 
220 	/* No start-stop filtering for ViewInst */
221 	config->vissctlr = 0x0;
222 	config->vipcssctlr = 0x0;
223 
224 	/* Disable seq events */
225 	for (i = 0; i < drvdata->nrseqstate-1; i++)
226 		config->seq_ctrl[i] = 0x0;
227 	config->seq_rst = 0x0;
228 	config->seq_state = 0x0;
229 
230 	/* Disable external input events */
231 	config->ext_inp = 0x0;
232 
233 	config->cntr_idx = 0x0;
234 	for (i = 0; i < drvdata->nr_cntr; i++) {
235 		config->cntrldvr[i] = 0x0;
236 		config->cntr_ctrl[i] = 0x0;
237 		config->cntr_val[i] = 0x0;
238 	}
239 
240 	config->res_idx = 0x0;
241 	for (i = 2; i < 2 * drvdata->nr_resource; i++)
242 		config->res_ctrl[i] = 0x0;
243 
244 	config->ss_idx = 0x0;
245 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
246 		config->ss_ctrl[i] = 0x0;
247 		config->ss_pe_cmp[i] = 0x0;
248 	}
249 
250 	config->addr_idx = 0x0;
251 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
252 		config->addr_val[i] = 0x0;
253 		config->addr_acc[i] = 0x0;
254 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
255 	}
256 
257 	config->ctxid_idx = 0x0;
258 	for (i = 0; i < drvdata->numcidc; i++)
259 		config->ctxid_pid[i] = 0x0;
260 
261 	config->ctxid_mask0 = 0x0;
262 	config->ctxid_mask1 = 0x0;
263 
264 	config->vmid_idx = 0x0;
265 	for (i = 0; i < drvdata->numvmidc; i++)
266 		config->vmid_val[i] = 0x0;
267 	config->vmid_mask0 = 0x0;
268 	config->vmid_mask1 = 0x0;
269 
270 	raw_spin_unlock(&drvdata->spinlock);
271 
272 	/* for sysfs - only release trace id when resetting */
273 	etm4_release_trace_id(drvdata);
274 
275 	cscfg_csdev_reset_feats(to_coresight_device(dev));
276 
277 	return size;
278 }
279 static DEVICE_ATTR_WO(reset);
280 
mode_show(struct device * dev,struct device_attribute * attr,char * buf)281 static ssize_t mode_show(struct device *dev,
282 			 struct device_attribute *attr,
283 			 char *buf)
284 {
285 	unsigned long val;
286 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
287 	struct etmv4_config *config = &drvdata->config;
288 
289 	val = config->mode;
290 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
291 }
292 
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)293 static ssize_t mode_store(struct device *dev,
294 			  struct device_attribute *attr,
295 			  const char *buf, size_t size)
296 {
297 	unsigned long val, mode;
298 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
299 	struct etmv4_config *config = &drvdata->config;
300 
301 	if (kstrtoul(buf, 16, &val))
302 		return -EINVAL;
303 
304 	raw_spin_lock(&drvdata->spinlock);
305 	config->mode = val & ETMv4_MODE_ALL;
306 
307 	if (drvdata->instrp0 == true) {
308 		/* start by clearing instruction P0 field */
309 		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
310 		if (config->mode & ETM_MODE_LOAD)
311 			/* 0b01 Trace load instructions as P0 instructions */
312 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
313 		if (config->mode & ETM_MODE_STORE)
314 			/* 0b10 Trace store instructions as P0 instructions */
315 			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
316 		if (config->mode & ETM_MODE_LOAD_STORE)
317 			/*
318 			 * 0b11 Trace load and store instructions
319 			 * as P0 instructions
320 			 */
321 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
322 	}
323 
324 	/* bit[3], Branch broadcast mode */
325 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
326 		config->cfg |= TRCCONFIGR_BB;
327 	else
328 		config->cfg &= ~TRCCONFIGR_BB;
329 
330 	/* bit[4], Cycle counting instruction trace bit */
331 	if ((config->mode & ETMv4_MODE_CYCACC) &&
332 		(drvdata->trccci == true))
333 		config->cfg |= TRCCONFIGR_CCI;
334 	else
335 		config->cfg &= ~TRCCONFIGR_CCI;
336 
337 	/* bit[6], Context ID tracing bit */
338 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
339 		config->cfg |= TRCCONFIGR_CID;
340 	else
341 		config->cfg &= ~TRCCONFIGR_CID;
342 
343 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
344 		config->cfg |= TRCCONFIGR_VMID;
345 	else
346 		config->cfg &= ~TRCCONFIGR_VMID;
347 
348 	/* bits[10:8], Conditional instruction tracing bit */
349 	mode = ETM_MODE_COND(config->mode);
350 	if (drvdata->trccond == true) {
351 		config->cfg &= ~TRCCONFIGR_COND_MASK;
352 		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
353 	}
354 
355 	/* bit[11], Global timestamp tracing bit */
356 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
357 		config->cfg |= TRCCONFIGR_TS;
358 	else
359 		config->cfg &= ~TRCCONFIGR_TS;
360 
361 	/* bit[12], Return stack enable bit */
362 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
363 					(drvdata->retstack == true))
364 		config->cfg |= TRCCONFIGR_RS;
365 	else
366 		config->cfg &= ~TRCCONFIGR_RS;
367 
368 	/* bits[14:13], Q element enable field */
369 	mode = ETM_MODE_QELEM(config->mode);
370 	/* start by clearing QE bits */
371 	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
372 	/*
373 	 * if supported, Q elements with instruction counts are enabled.
374 	 * Always set the low bit for any requested mode. Valid combos are
375 	 * 0b00, 0b01 and 0b11.
376 	 */
377 	if (mode && drvdata->q_support)
378 		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
379 	/*
380 	 * if supported, Q elements with and without instruction
381 	 * counts are enabled
382 	 */
383 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
384 		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
385 
386 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
387 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
388 	    (drvdata->atbtrig == true))
389 		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
390 	else
391 		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
392 
393 	/* bit[12], Low-power state behavior override bit */
394 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
395 	    (drvdata->lpoverride == true))
396 		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
397 	else
398 		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
399 
400 	/* bit[8], Instruction stall bit */
401 	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
402 		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
403 	else
404 		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
405 
406 	/* bit[10], Prioritize instruction trace bit */
407 	if (config->mode & ETM_MODE_INSTPRIO)
408 		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
409 	else
410 		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
411 
412 	/* bit[13], Trace overflow prevention bit */
413 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
414 		(drvdata->nooverflow == true))
415 		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
416 	else
417 		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
418 
419 	/* bit[9] Start/stop logic control bit */
420 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
421 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
422 	else
423 		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
424 
425 	/* bit[10], Whether a trace unit must trace a Reset exception */
426 	if (config->mode & ETM_MODE_TRACE_RESET)
427 		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
428 	else
429 		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
430 
431 	/* bit[11], Whether a trace unit must trace a system error exception */
432 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
433 		(drvdata->trc_error == true))
434 		config->vinst_ctrl |= TRCVICTLR_TRCERR;
435 	else
436 		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
437 
438 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
439 		etm4_config_trace_mode(config);
440 
441 	raw_spin_unlock(&drvdata->spinlock);
442 
443 	return size;
444 }
445 static DEVICE_ATTR_RW(mode);
446 
pe_show(struct device * dev,struct device_attribute * attr,char * buf)447 static ssize_t pe_show(struct device *dev,
448 		       struct device_attribute *attr,
449 		       char *buf)
450 {
451 	unsigned long val;
452 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
453 	struct etmv4_config *config = &drvdata->config;
454 
455 	val = config->pe_sel;
456 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
457 }
458 
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)459 static ssize_t pe_store(struct device *dev,
460 			struct device_attribute *attr,
461 			const char *buf, size_t size)
462 {
463 	unsigned long val;
464 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
465 	struct etmv4_config *config = &drvdata->config;
466 
467 	if (kstrtoul(buf, 16, &val))
468 		return -EINVAL;
469 
470 	raw_spin_lock(&drvdata->spinlock);
471 	if (val > drvdata->nr_pe) {
472 		raw_spin_unlock(&drvdata->spinlock);
473 		return -EINVAL;
474 	}
475 
476 	config->pe_sel = val;
477 	raw_spin_unlock(&drvdata->spinlock);
478 	return size;
479 }
480 static DEVICE_ATTR_RW(pe);
481 
event_show(struct device * dev,struct device_attribute * attr,char * buf)482 static ssize_t event_show(struct device *dev,
483 			  struct device_attribute *attr,
484 			  char *buf)
485 {
486 	unsigned long val;
487 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
488 	struct etmv4_config *config = &drvdata->config;
489 
490 	val = config->eventctrl0;
491 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
492 }
493 
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)494 static ssize_t event_store(struct device *dev,
495 			   struct device_attribute *attr,
496 			   const char *buf, size_t size)
497 {
498 	unsigned long val;
499 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
500 	struct etmv4_config *config = &drvdata->config;
501 
502 	if (kstrtoul(buf, 16, &val))
503 		return -EINVAL;
504 
505 	raw_spin_lock(&drvdata->spinlock);
506 	switch (drvdata->nr_event) {
507 	case 0x0:
508 		/* EVENT0, bits[7:0] */
509 		config->eventctrl0 = val & 0xFF;
510 		break;
511 	case 0x1:
512 		 /* EVENT1, bits[15:8] */
513 		config->eventctrl0 = val & 0xFFFF;
514 		break;
515 	case 0x2:
516 		/* EVENT2, bits[23:16] */
517 		config->eventctrl0 = val & 0xFFFFFF;
518 		break;
519 	case 0x3:
520 		/* EVENT3, bits[31:24] */
521 		config->eventctrl0 = val;
522 		break;
523 	default:
524 		break;
525 	}
526 	raw_spin_unlock(&drvdata->spinlock);
527 	return size;
528 }
529 static DEVICE_ATTR_RW(event);
530 
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t event_instren_show(struct device *dev,
532 				  struct device_attribute *attr,
533 				  char *buf)
534 {
535 	unsigned long val;
536 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
537 	struct etmv4_config *config = &drvdata->config;
538 
539 	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
540 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
541 }
542 
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)543 static ssize_t event_instren_store(struct device *dev,
544 				   struct device_attribute *attr,
545 				   const char *buf, size_t size)
546 {
547 	unsigned long val;
548 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
549 	struct etmv4_config *config = &drvdata->config;
550 
551 	if (kstrtoul(buf, 16, &val))
552 		return -EINVAL;
553 
554 	raw_spin_lock(&drvdata->spinlock);
555 	/* start by clearing all instruction event enable bits */
556 	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
557 	switch (drvdata->nr_event) {
558 	case 0x0:
559 		/* generate Event element for event 1 */
560 		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
561 		break;
562 	case 0x1:
563 		/* generate Event element for event 1 and 2 */
564 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
565 		break;
566 	case 0x2:
567 		/* generate Event element for event 1, 2 and 3 */
568 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
569 					     TRCEVENTCTL1R_INSTEN_1 |
570 					     TRCEVENTCTL1R_INSTEN_2);
571 		break;
572 	case 0x3:
573 		/* generate Event element for all 4 events */
574 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
575 					     TRCEVENTCTL1R_INSTEN_1 |
576 					     TRCEVENTCTL1R_INSTEN_2 |
577 					     TRCEVENTCTL1R_INSTEN_3);
578 		break;
579 	default:
580 		break;
581 	}
582 	raw_spin_unlock(&drvdata->spinlock);
583 	return size;
584 }
585 static DEVICE_ATTR_RW(event_instren);
586 
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)587 static ssize_t event_ts_show(struct device *dev,
588 			     struct device_attribute *attr,
589 			     char *buf)
590 {
591 	unsigned long val;
592 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
593 	struct etmv4_config *config = &drvdata->config;
594 
595 	val = config->ts_ctrl;
596 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
597 }
598 
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)599 static ssize_t event_ts_store(struct device *dev,
600 			      struct device_attribute *attr,
601 			      const char *buf, size_t size)
602 {
603 	unsigned long val;
604 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
605 	struct etmv4_config *config = &drvdata->config;
606 
607 	if (kstrtoul(buf, 16, &val))
608 		return -EINVAL;
609 	if (!drvdata->ts_size)
610 		return -EINVAL;
611 
612 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
613 	return size;
614 }
615 static DEVICE_ATTR_RW(event_ts);
616 
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)617 static ssize_t syncfreq_show(struct device *dev,
618 			     struct device_attribute *attr,
619 			     char *buf)
620 {
621 	unsigned long val;
622 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
623 	struct etmv4_config *config = &drvdata->config;
624 
625 	val = config->syncfreq;
626 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
627 }
628 
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)629 static ssize_t syncfreq_store(struct device *dev,
630 			      struct device_attribute *attr,
631 			      const char *buf, size_t size)
632 {
633 	unsigned long val;
634 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
635 	struct etmv4_config *config = &drvdata->config;
636 
637 	if (kstrtoul(buf, 16, &val))
638 		return -EINVAL;
639 	if (drvdata->syncpr == true)
640 		return -EINVAL;
641 
642 	config->syncfreq = val & ETMv4_SYNC_MASK;
643 	return size;
644 }
645 static DEVICE_ATTR_RW(syncfreq);
646 
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)647 static ssize_t cyc_threshold_show(struct device *dev,
648 				  struct device_attribute *attr,
649 				  char *buf)
650 {
651 	unsigned long val;
652 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
653 	struct etmv4_config *config = &drvdata->config;
654 
655 	val = config->ccctlr;
656 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
657 }
658 
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)659 static ssize_t cyc_threshold_store(struct device *dev,
660 				   struct device_attribute *attr,
661 				   const char *buf, size_t size)
662 {
663 	unsigned long val;
664 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
665 	struct etmv4_config *config = &drvdata->config;
666 
667 	if (kstrtoul(buf, 16, &val))
668 		return -EINVAL;
669 
670 	/* mask off max threshold before checking min value */
671 	val &= ETM_CYC_THRESHOLD_MASK;
672 	if (val < drvdata->ccitmin)
673 		return -EINVAL;
674 
675 	config->ccctlr = val;
676 	return size;
677 }
678 static DEVICE_ATTR_RW(cyc_threshold);
679 
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)680 static ssize_t bb_ctrl_show(struct device *dev,
681 			    struct device_attribute *attr,
682 			    char *buf)
683 {
684 	unsigned long val;
685 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
686 	struct etmv4_config *config = &drvdata->config;
687 
688 	val = config->bb_ctrl;
689 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
690 }
691 
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)692 static ssize_t bb_ctrl_store(struct device *dev,
693 			     struct device_attribute *attr,
694 			     const char *buf, size_t size)
695 {
696 	unsigned long val;
697 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
698 	struct etmv4_config *config = &drvdata->config;
699 
700 	if (kstrtoul(buf, 16, &val))
701 		return -EINVAL;
702 	if (drvdata->trcbb == false)
703 		return -EINVAL;
704 	if (!drvdata->nr_addr_cmp)
705 		return -EINVAL;
706 
707 	/*
708 	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
709 	 * individual range comparators. If include then at least 1
710 	 * range must be selected.
711 	 */
712 	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
713 		return -EINVAL;
714 
715 	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
716 	return size;
717 }
718 static DEVICE_ATTR_RW(bb_ctrl);
719 
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)720 static ssize_t event_vinst_show(struct device *dev,
721 				struct device_attribute *attr,
722 				char *buf)
723 {
724 	unsigned long val;
725 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
726 	struct etmv4_config *config = &drvdata->config;
727 
728 	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
729 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
730 }
731 
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)732 static ssize_t event_vinst_store(struct device *dev,
733 				 struct device_attribute *attr,
734 				 const char *buf, size_t size)
735 {
736 	unsigned long val;
737 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
738 	struct etmv4_config *config = &drvdata->config;
739 
740 	if (kstrtoul(buf, 16, &val))
741 		return -EINVAL;
742 
743 	raw_spin_lock(&drvdata->spinlock);
744 	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
745 	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
746 	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
747 	raw_spin_unlock(&drvdata->spinlock);
748 	return size;
749 }
750 static DEVICE_ATTR_RW(event_vinst);
751 
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)752 static ssize_t s_exlevel_vinst_show(struct device *dev,
753 				    struct device_attribute *attr,
754 				    char *buf)
755 {
756 	unsigned long val;
757 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
758 	struct etmv4_config *config = &drvdata->config;
759 
760 	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
761 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
762 }
763 
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)764 static ssize_t s_exlevel_vinst_store(struct device *dev,
765 				     struct device_attribute *attr,
766 				     const char *buf, size_t size)
767 {
768 	unsigned long val;
769 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
770 	struct etmv4_config *config = &drvdata->config;
771 
772 	if (kstrtoul(buf, 16, &val))
773 		return -EINVAL;
774 
775 	raw_spin_lock(&drvdata->spinlock);
776 	/* clear all EXLEVEL_S bits  */
777 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
778 	/* enable instruction tracing for corresponding exception level */
779 	val &= drvdata->s_ex_level;
780 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
781 	raw_spin_unlock(&drvdata->spinlock);
782 	return size;
783 }
784 static DEVICE_ATTR_RW(s_exlevel_vinst);
785 
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)786 static ssize_t ns_exlevel_vinst_show(struct device *dev,
787 				     struct device_attribute *attr,
788 				     char *buf)
789 {
790 	unsigned long val;
791 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
792 	struct etmv4_config *config = &drvdata->config;
793 
794 	/* EXLEVEL_NS, bits[23:20] */
795 	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
796 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
797 }
798 
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)799 static ssize_t ns_exlevel_vinst_store(struct device *dev,
800 				      struct device_attribute *attr,
801 				      const char *buf, size_t size)
802 {
803 	unsigned long val;
804 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
805 	struct etmv4_config *config = &drvdata->config;
806 
807 	if (kstrtoul(buf, 16, &val))
808 		return -EINVAL;
809 
810 	raw_spin_lock(&drvdata->spinlock);
811 	/* clear EXLEVEL_NS bits  */
812 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
813 	/* enable instruction tracing for corresponding exception level */
814 	val &= drvdata->ns_ex_level;
815 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
816 	raw_spin_unlock(&drvdata->spinlock);
817 	return size;
818 }
819 static DEVICE_ATTR_RW(ns_exlevel_vinst);
820 
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)821 static ssize_t addr_idx_show(struct device *dev,
822 			     struct device_attribute *attr,
823 			     char *buf)
824 {
825 	unsigned long val;
826 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
827 	struct etmv4_config *config = &drvdata->config;
828 
829 	val = config->addr_idx;
830 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
831 }
832 
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)833 static ssize_t addr_idx_store(struct device *dev,
834 			      struct device_attribute *attr,
835 			      const char *buf, size_t size)
836 {
837 	unsigned long val;
838 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
839 	struct etmv4_config *config = &drvdata->config;
840 
841 	if (kstrtoul(buf, 16, &val))
842 		return -EINVAL;
843 	if (val >= drvdata->nr_addr_cmp * 2)
844 		return -EINVAL;
845 
846 	/*
847 	 * Use spinlock to ensure index doesn't change while it gets
848 	 * dereferenced multiple times within a spinlock block elsewhere.
849 	 */
850 	raw_spin_lock(&drvdata->spinlock);
851 	config->addr_idx = val;
852 	raw_spin_unlock(&drvdata->spinlock);
853 	return size;
854 }
855 static DEVICE_ATTR_RW(addr_idx);
856 
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)857 static ssize_t addr_instdatatype_show(struct device *dev,
858 				      struct device_attribute *attr,
859 				      char *buf)
860 {
861 	ssize_t len;
862 	u8 val, idx;
863 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
864 	struct etmv4_config *config = &drvdata->config;
865 
866 	raw_spin_lock(&drvdata->spinlock);
867 	idx = config->addr_idx;
868 	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
869 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
870 			val == TRCACATRn_TYPE_ADDR ? "instr" :
871 			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
872 			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
873 			"data_load_store")));
874 	raw_spin_unlock(&drvdata->spinlock);
875 	return len;
876 }
877 
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)878 static ssize_t addr_instdatatype_store(struct device *dev,
879 				       struct device_attribute *attr,
880 				       const char *buf, size_t size)
881 {
882 	u8 idx;
883 	char str[20] = "";
884 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
885 	struct etmv4_config *config = &drvdata->config;
886 
887 	if (strlen(buf) >= 20)
888 		return -EINVAL;
889 	if (sscanf(buf, "%s", str) != 1)
890 		return -EINVAL;
891 
892 	raw_spin_lock(&drvdata->spinlock);
893 	idx = config->addr_idx;
894 	if (!strcmp(str, "instr"))
895 		/* TYPE, bits[1:0] */
896 		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
897 
898 	raw_spin_unlock(&drvdata->spinlock);
899 	return size;
900 }
901 static DEVICE_ATTR_RW(addr_instdatatype);
902 
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)903 static ssize_t addr_single_show(struct device *dev,
904 				struct device_attribute *attr,
905 				char *buf)
906 {
907 	u8 idx;
908 	unsigned long val;
909 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
910 	struct etmv4_config *config = &drvdata->config;
911 
912 	idx = config->addr_idx;
913 	raw_spin_lock(&drvdata->spinlock);
914 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
915 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
916 		raw_spin_unlock(&drvdata->spinlock);
917 		return -EPERM;
918 	}
919 	val = (unsigned long)config->addr_val[idx];
920 	raw_spin_unlock(&drvdata->spinlock);
921 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
922 }
923 
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)924 static ssize_t addr_single_store(struct device *dev,
925 				 struct device_attribute *attr,
926 				 const char *buf, size_t size)
927 {
928 	u8 idx;
929 	unsigned long val;
930 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
931 	struct etmv4_config *config = &drvdata->config;
932 
933 	if (kstrtoul(buf, 16, &val))
934 		return -EINVAL;
935 
936 	raw_spin_lock(&drvdata->spinlock);
937 	idx = config->addr_idx;
938 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
939 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
940 		raw_spin_unlock(&drvdata->spinlock);
941 		return -EPERM;
942 	}
943 
944 	config->addr_val[idx] = (u64)val;
945 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
946 	raw_spin_unlock(&drvdata->spinlock);
947 	return size;
948 }
949 static DEVICE_ATTR_RW(addr_single);
950 
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)951 static ssize_t addr_range_show(struct device *dev,
952 			       struct device_attribute *attr,
953 			       char *buf)
954 {
955 	u8 idx;
956 	unsigned long val1, val2;
957 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
958 	struct etmv4_config *config = &drvdata->config;
959 
960 	raw_spin_lock(&drvdata->spinlock);
961 	idx = config->addr_idx;
962 	if (idx % 2 != 0) {
963 		raw_spin_unlock(&drvdata->spinlock);
964 		return -EPERM;
965 	}
966 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
967 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
968 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
969 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
970 		raw_spin_unlock(&drvdata->spinlock);
971 		return -EPERM;
972 	}
973 
974 	val1 = (unsigned long)config->addr_val[idx];
975 	val2 = (unsigned long)config->addr_val[idx + 1];
976 	raw_spin_unlock(&drvdata->spinlock);
977 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
978 }
979 
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)980 static ssize_t addr_range_store(struct device *dev,
981 				struct device_attribute *attr,
982 				const char *buf, size_t size)
983 {
984 	u8 idx;
985 	unsigned long val1, val2;
986 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
987 	struct etmv4_config *config = &drvdata->config;
988 	int elements, exclude;
989 
990 	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
991 
992 	/*  exclude is optional, but need at least two parameter */
993 	if (elements < 2)
994 		return -EINVAL;
995 	/* lower address comparator cannot have a higher address value */
996 	if (val1 > val2)
997 		return -EINVAL;
998 
999 	raw_spin_lock(&drvdata->spinlock);
1000 	idx = config->addr_idx;
1001 	if (idx % 2 != 0) {
1002 		raw_spin_unlock(&drvdata->spinlock);
1003 		return -EPERM;
1004 	}
1005 
1006 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1007 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1008 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1009 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1010 		raw_spin_unlock(&drvdata->spinlock);
1011 		return -EPERM;
1012 	}
1013 
1014 	config->addr_val[idx] = (u64)val1;
1015 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1016 	config->addr_val[idx + 1] = (u64)val2;
1017 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1018 	/*
1019 	 * Program include or exclude control bits for vinst or vdata
1020 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1021 	 * use supplied value, or default to bit set in 'mode'
1022 	 */
1023 	if (elements != 3)
1024 		exclude = config->mode & ETM_MODE_EXCLUDE;
1025 	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1026 
1027 	raw_spin_unlock(&drvdata->spinlock);
1028 	return size;
1029 }
1030 static DEVICE_ATTR_RW(addr_range);
1031 
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1032 static ssize_t addr_start_show(struct device *dev,
1033 			       struct device_attribute *attr,
1034 			       char *buf)
1035 {
1036 	u8 idx;
1037 	unsigned long val;
1038 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1039 	struct etmv4_config *config = &drvdata->config;
1040 
1041 	raw_spin_lock(&drvdata->spinlock);
1042 	idx = config->addr_idx;
1043 
1044 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1045 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1046 		raw_spin_unlock(&drvdata->spinlock);
1047 		return -EPERM;
1048 	}
1049 
1050 	val = (unsigned long)config->addr_val[idx];
1051 	raw_spin_unlock(&drvdata->spinlock);
1052 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1053 }
1054 
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1055 static ssize_t addr_start_store(struct device *dev,
1056 				struct device_attribute *attr,
1057 				const char *buf, size_t size)
1058 {
1059 	u8 idx;
1060 	unsigned long val;
1061 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1062 	struct etmv4_config *config = &drvdata->config;
1063 
1064 	if (kstrtoul(buf, 16, &val))
1065 		return -EINVAL;
1066 
1067 	raw_spin_lock(&drvdata->spinlock);
1068 	idx = config->addr_idx;
1069 	if (!drvdata->nr_addr_cmp) {
1070 		raw_spin_unlock(&drvdata->spinlock);
1071 		return -EINVAL;
1072 	}
1073 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1074 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1075 		raw_spin_unlock(&drvdata->spinlock);
1076 		return -EPERM;
1077 	}
1078 
1079 	config->addr_val[idx] = (u64)val;
1080 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1081 	config->vissctlr |= BIT(idx);
1082 	raw_spin_unlock(&drvdata->spinlock);
1083 	return size;
1084 }
1085 static DEVICE_ATTR_RW(addr_start);
1086 
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1087 static ssize_t addr_stop_show(struct device *dev,
1088 			      struct device_attribute *attr,
1089 			      char *buf)
1090 {
1091 	u8 idx;
1092 	unsigned long val;
1093 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094 	struct etmv4_config *config = &drvdata->config;
1095 
1096 	raw_spin_lock(&drvdata->spinlock);
1097 	idx = config->addr_idx;
1098 
1099 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1100 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1101 		raw_spin_unlock(&drvdata->spinlock);
1102 		return -EPERM;
1103 	}
1104 
1105 	val = (unsigned long)config->addr_val[idx];
1106 	raw_spin_unlock(&drvdata->spinlock);
1107 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1108 }
1109 
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1110 static ssize_t addr_stop_store(struct device *dev,
1111 			       struct device_attribute *attr,
1112 			       const char *buf, size_t size)
1113 {
1114 	u8 idx;
1115 	unsigned long val;
1116 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1117 	struct etmv4_config *config = &drvdata->config;
1118 
1119 	if (kstrtoul(buf, 16, &val))
1120 		return -EINVAL;
1121 
1122 	raw_spin_lock(&drvdata->spinlock);
1123 	idx = config->addr_idx;
1124 	if (!drvdata->nr_addr_cmp) {
1125 		raw_spin_unlock(&drvdata->spinlock);
1126 		return -EINVAL;
1127 	}
1128 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1129 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1130 		raw_spin_unlock(&drvdata->spinlock);
1131 		return -EPERM;
1132 	}
1133 
1134 	config->addr_val[idx] = (u64)val;
1135 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1136 	config->vissctlr |= BIT(idx + 16);
1137 	raw_spin_unlock(&drvdata->spinlock);
1138 	return size;
1139 }
1140 static DEVICE_ATTR_RW(addr_stop);
1141 
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1142 static ssize_t addr_ctxtype_show(struct device *dev,
1143 				 struct device_attribute *attr,
1144 				 char *buf)
1145 {
1146 	ssize_t len;
1147 	u8 idx, val;
1148 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1149 	struct etmv4_config *config = &drvdata->config;
1150 
1151 	raw_spin_lock(&drvdata->spinlock);
1152 	idx = config->addr_idx;
1153 	/* CONTEXTTYPE, bits[3:2] */
1154 	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1155 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1156 			(val == ETM_CTX_CTXID ? "ctxid" :
1157 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1158 	raw_spin_unlock(&drvdata->spinlock);
1159 	return len;
1160 }
1161 
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1162 static ssize_t addr_ctxtype_store(struct device *dev,
1163 				  struct device_attribute *attr,
1164 				  const char *buf, size_t size)
1165 {
1166 	u8 idx;
1167 	char str[10] = "";
1168 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1169 	struct etmv4_config *config = &drvdata->config;
1170 
1171 	if (strlen(buf) >= 10)
1172 		return -EINVAL;
1173 	if (sscanf(buf, "%s", str) != 1)
1174 		return -EINVAL;
1175 
1176 	raw_spin_lock(&drvdata->spinlock);
1177 	idx = config->addr_idx;
1178 	if (!strcmp(str, "none"))
1179 		/* start by clearing context type bits */
1180 		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1181 	else if (!strcmp(str, "ctxid")) {
1182 		/* 0b01 The trace unit performs a Context ID */
1183 		if (drvdata->numcidc) {
1184 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1185 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1186 		}
1187 	} else if (!strcmp(str, "vmid")) {
1188 		/* 0b10 The trace unit performs a VMID */
1189 		if (drvdata->numvmidc) {
1190 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1191 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1192 		}
1193 	} else if (!strcmp(str, "all")) {
1194 		/*
1195 		 * 0b11 The trace unit performs a Context ID
1196 		 * comparison and a VMID
1197 		 */
1198 		if (drvdata->numcidc)
1199 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1200 		if (drvdata->numvmidc)
1201 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1202 	}
1203 	raw_spin_unlock(&drvdata->spinlock);
1204 	return size;
1205 }
1206 static DEVICE_ATTR_RW(addr_ctxtype);
1207 
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1208 static ssize_t addr_context_show(struct device *dev,
1209 				 struct device_attribute *attr,
1210 				 char *buf)
1211 {
1212 	u8 idx;
1213 	unsigned long val;
1214 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1215 	struct etmv4_config *config = &drvdata->config;
1216 
1217 	raw_spin_lock(&drvdata->spinlock);
1218 	idx = config->addr_idx;
1219 	/* context ID comparator bits[6:4] */
1220 	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1221 	raw_spin_unlock(&drvdata->spinlock);
1222 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1223 }
1224 
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1225 static ssize_t addr_context_store(struct device *dev,
1226 				  struct device_attribute *attr,
1227 				  const char *buf, size_t size)
1228 {
1229 	u8 idx;
1230 	unsigned long val;
1231 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1232 	struct etmv4_config *config = &drvdata->config;
1233 
1234 	if (kstrtoul(buf, 16, &val))
1235 		return -EINVAL;
1236 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1237 		return -EINVAL;
1238 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1239 		     drvdata->numcidc : drvdata->numvmidc))
1240 		return -EINVAL;
1241 
1242 	raw_spin_lock(&drvdata->spinlock);
1243 	idx = config->addr_idx;
1244 	/* clear context ID comparator bits[6:4] */
1245 	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1246 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1247 	raw_spin_unlock(&drvdata->spinlock);
1248 	return size;
1249 }
1250 static DEVICE_ATTR_RW(addr_context);
1251 
addr_exlevel_s_ns_show(struct device * dev,struct device_attribute * attr,char * buf)1252 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1253 				      struct device_attribute *attr,
1254 				      char *buf)
1255 {
1256 	u8 idx;
1257 	unsigned long val;
1258 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1259 	struct etmv4_config *config = &drvdata->config;
1260 
1261 	raw_spin_lock(&drvdata->spinlock);
1262 	idx = config->addr_idx;
1263 	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1264 	raw_spin_unlock(&drvdata->spinlock);
1265 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1266 }
1267 
addr_exlevel_s_ns_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1268 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1269 				       struct device_attribute *attr,
1270 				       const char *buf, size_t size)
1271 {
1272 	u8 idx;
1273 	unsigned long val;
1274 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1275 	struct etmv4_config *config = &drvdata->config;
1276 
1277 	if (kstrtoul(buf, 0, &val))
1278 		return -EINVAL;
1279 
1280 	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1281 		return -EINVAL;
1282 
1283 	raw_spin_lock(&drvdata->spinlock);
1284 	idx = config->addr_idx;
1285 	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1286 	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1287 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1288 	raw_spin_unlock(&drvdata->spinlock);
1289 	return size;
1290 }
1291 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1292 
1293 static const char * const addr_type_names[] = {
1294 	"unused",
1295 	"single",
1296 	"range",
1297 	"start",
1298 	"stop"
1299 };
1300 
addr_cmp_view_show(struct device * dev,struct device_attribute * attr,char * buf)1301 static ssize_t addr_cmp_view_show(struct device *dev,
1302 				  struct device_attribute *attr, char *buf)
1303 {
1304 	u8 idx, addr_type;
1305 	unsigned long addr_v, addr_v2, addr_ctrl;
1306 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1307 	struct etmv4_config *config = &drvdata->config;
1308 	int size = 0;
1309 	bool exclude = false;
1310 
1311 	raw_spin_lock(&drvdata->spinlock);
1312 	idx = config->addr_idx;
1313 	addr_v = config->addr_val[idx];
1314 	addr_ctrl = config->addr_acc[idx];
1315 	addr_type = config->addr_type[idx];
1316 	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1317 		if (idx & 0x1) {
1318 			idx -= 1;
1319 			addr_v2 = addr_v;
1320 			addr_v = config->addr_val[idx];
1321 		} else {
1322 			addr_v2 = config->addr_val[idx + 1];
1323 		}
1324 		exclude = config->viiectlr & BIT(idx / 2 + 16);
1325 	}
1326 	raw_spin_unlock(&drvdata->spinlock);
1327 	if (addr_type) {
1328 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1329 				 addr_type_names[addr_type], addr_v);
1330 		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1331 			size += scnprintf(buf + size, PAGE_SIZE - size,
1332 					  " %#lx %s", addr_v2,
1333 					  exclude ? "exclude" : "include");
1334 		}
1335 		size += scnprintf(buf + size, PAGE_SIZE - size,
1336 				  " ctrl(%#lx)\n", addr_ctrl);
1337 	} else {
1338 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1339 	}
1340 	return size;
1341 }
1342 static DEVICE_ATTR_RO(addr_cmp_view);
1343 
vinst_pe_cmp_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1344 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1345 					    struct device_attribute *attr,
1346 					    char *buf)
1347 {
1348 	unsigned long val;
1349 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1350 	struct etmv4_config *config = &drvdata->config;
1351 
1352 	if (!drvdata->nr_pe_cmp)
1353 		return -EINVAL;
1354 	val = config->vipcssctlr;
1355 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1356 }
vinst_pe_cmp_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1357 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1358 					     struct device_attribute *attr,
1359 					     const char *buf, size_t size)
1360 {
1361 	unsigned long val;
1362 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1363 	struct etmv4_config *config = &drvdata->config;
1364 
1365 	if (kstrtoul(buf, 16, &val))
1366 		return -EINVAL;
1367 	if (!drvdata->nr_pe_cmp)
1368 		return -EINVAL;
1369 
1370 	raw_spin_lock(&drvdata->spinlock);
1371 	config->vipcssctlr = val;
1372 	raw_spin_unlock(&drvdata->spinlock);
1373 	return size;
1374 }
1375 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1376 
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1377 static ssize_t seq_idx_show(struct device *dev,
1378 			    struct device_attribute *attr,
1379 			    char *buf)
1380 {
1381 	unsigned long val;
1382 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1383 	struct etmv4_config *config = &drvdata->config;
1384 
1385 	val = config->seq_idx;
1386 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1387 }
1388 
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1389 static ssize_t seq_idx_store(struct device *dev,
1390 			     struct device_attribute *attr,
1391 			     const char *buf, size_t size)
1392 {
1393 	unsigned long val;
1394 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1395 	struct etmv4_config *config = &drvdata->config;
1396 
1397 	if (kstrtoul(buf, 16, &val))
1398 		return -EINVAL;
1399 	if (val >= drvdata->nrseqstate - 1)
1400 		return -EINVAL;
1401 
1402 	/*
1403 	 * Use spinlock to ensure index doesn't change while it gets
1404 	 * dereferenced multiple times within a spinlock block elsewhere.
1405 	 */
1406 	raw_spin_lock(&drvdata->spinlock);
1407 	config->seq_idx = val;
1408 	raw_spin_unlock(&drvdata->spinlock);
1409 	return size;
1410 }
1411 static DEVICE_ATTR_RW(seq_idx);
1412 
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1413 static ssize_t seq_state_show(struct device *dev,
1414 			      struct device_attribute *attr,
1415 			      char *buf)
1416 {
1417 	unsigned long val;
1418 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1419 	struct etmv4_config *config = &drvdata->config;
1420 
1421 	val = config->seq_state;
1422 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1423 }
1424 
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1425 static ssize_t seq_state_store(struct device *dev,
1426 			       struct device_attribute *attr,
1427 			       const char *buf, size_t size)
1428 {
1429 	unsigned long val;
1430 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1431 	struct etmv4_config *config = &drvdata->config;
1432 
1433 	if (kstrtoul(buf, 16, &val))
1434 		return -EINVAL;
1435 	if (val >= drvdata->nrseqstate)
1436 		return -EINVAL;
1437 
1438 	config->seq_state = val;
1439 	return size;
1440 }
1441 static DEVICE_ATTR_RW(seq_state);
1442 
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1443 static ssize_t seq_event_show(struct device *dev,
1444 			      struct device_attribute *attr,
1445 			      char *buf)
1446 {
1447 	u8 idx;
1448 	unsigned long val;
1449 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1450 	struct etmv4_config *config = &drvdata->config;
1451 
1452 	raw_spin_lock(&drvdata->spinlock);
1453 	idx = config->seq_idx;
1454 	val = config->seq_ctrl[idx];
1455 	raw_spin_unlock(&drvdata->spinlock);
1456 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1457 }
1458 
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1459 static ssize_t seq_event_store(struct device *dev,
1460 			       struct device_attribute *attr,
1461 			       const char *buf, size_t size)
1462 {
1463 	u8 idx;
1464 	unsigned long val;
1465 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1466 	struct etmv4_config *config = &drvdata->config;
1467 
1468 	if (kstrtoul(buf, 16, &val))
1469 		return -EINVAL;
1470 
1471 	raw_spin_lock(&drvdata->spinlock);
1472 	idx = config->seq_idx;
1473 	/* Seq control has two masks B[15:8] F[7:0] */
1474 	config->seq_ctrl[idx] = val & 0xFFFF;
1475 	raw_spin_unlock(&drvdata->spinlock);
1476 	return size;
1477 }
1478 static DEVICE_ATTR_RW(seq_event);
1479 
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1480 static ssize_t seq_reset_event_show(struct device *dev,
1481 				    struct device_attribute *attr,
1482 				    char *buf)
1483 {
1484 	unsigned long val;
1485 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1486 	struct etmv4_config *config = &drvdata->config;
1487 
1488 	val = config->seq_rst;
1489 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1490 }
1491 
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1492 static ssize_t seq_reset_event_store(struct device *dev,
1493 				     struct device_attribute *attr,
1494 				     const char *buf, size_t size)
1495 {
1496 	unsigned long val;
1497 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1498 	struct etmv4_config *config = &drvdata->config;
1499 
1500 	if (kstrtoul(buf, 16, &val))
1501 		return -EINVAL;
1502 	if (!(drvdata->nrseqstate))
1503 		return -EINVAL;
1504 
1505 	config->seq_rst = val & ETMv4_EVENT_MASK;
1506 	return size;
1507 }
1508 static DEVICE_ATTR_RW(seq_reset_event);
1509 
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1510 static ssize_t cntr_idx_show(struct device *dev,
1511 			     struct device_attribute *attr,
1512 			     char *buf)
1513 {
1514 	unsigned long val;
1515 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1516 	struct etmv4_config *config = &drvdata->config;
1517 
1518 	val = config->cntr_idx;
1519 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1520 }
1521 
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1522 static ssize_t cntr_idx_store(struct device *dev,
1523 			      struct device_attribute *attr,
1524 			      const char *buf, size_t size)
1525 {
1526 	unsigned long val;
1527 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1528 	struct etmv4_config *config = &drvdata->config;
1529 
1530 	if (kstrtoul(buf, 16, &val))
1531 		return -EINVAL;
1532 	if (val >= drvdata->nr_cntr)
1533 		return -EINVAL;
1534 
1535 	/*
1536 	 * Use spinlock to ensure index doesn't change while it gets
1537 	 * dereferenced multiple times within a spinlock block elsewhere.
1538 	 */
1539 	raw_spin_lock(&drvdata->spinlock);
1540 	config->cntr_idx = val;
1541 	raw_spin_unlock(&drvdata->spinlock);
1542 	return size;
1543 }
1544 static DEVICE_ATTR_RW(cntr_idx);
1545 
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1546 static ssize_t cntrldvr_show(struct device *dev,
1547 			     struct device_attribute *attr,
1548 			     char *buf)
1549 {
1550 	u8 idx;
1551 	unsigned long val;
1552 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1553 	struct etmv4_config *config = &drvdata->config;
1554 
1555 	raw_spin_lock(&drvdata->spinlock);
1556 	idx = config->cntr_idx;
1557 	val = config->cntrldvr[idx];
1558 	raw_spin_unlock(&drvdata->spinlock);
1559 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1560 }
1561 
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1562 static ssize_t cntrldvr_store(struct device *dev,
1563 			      struct device_attribute *attr,
1564 			      const char *buf, size_t size)
1565 {
1566 	u8 idx;
1567 	unsigned long val;
1568 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1569 	struct etmv4_config *config = &drvdata->config;
1570 
1571 	if (kstrtoul(buf, 16, &val))
1572 		return -EINVAL;
1573 	if (val > ETM_CNTR_MAX_VAL)
1574 		return -EINVAL;
1575 
1576 	raw_spin_lock(&drvdata->spinlock);
1577 	idx = config->cntr_idx;
1578 	config->cntrldvr[idx] = val;
1579 	raw_spin_unlock(&drvdata->spinlock);
1580 	return size;
1581 }
1582 static DEVICE_ATTR_RW(cntrldvr);
1583 
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1584 static ssize_t cntr_val_show(struct device *dev,
1585 			     struct device_attribute *attr,
1586 			     char *buf)
1587 {
1588 	u8 idx;
1589 	unsigned long val;
1590 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1591 	struct etmv4_config *config = &drvdata->config;
1592 
1593 	raw_spin_lock(&drvdata->spinlock);
1594 	idx = config->cntr_idx;
1595 	val = config->cntr_val[idx];
1596 	raw_spin_unlock(&drvdata->spinlock);
1597 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1598 }
1599 
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1600 static ssize_t cntr_val_store(struct device *dev,
1601 			      struct device_attribute *attr,
1602 			      const char *buf, size_t size)
1603 {
1604 	u8 idx;
1605 	unsigned long val;
1606 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1607 	struct etmv4_config *config = &drvdata->config;
1608 
1609 	if (kstrtoul(buf, 16, &val))
1610 		return -EINVAL;
1611 	if (val > ETM_CNTR_MAX_VAL)
1612 		return -EINVAL;
1613 
1614 	raw_spin_lock(&drvdata->spinlock);
1615 	idx = config->cntr_idx;
1616 	config->cntr_val[idx] = val;
1617 	raw_spin_unlock(&drvdata->spinlock);
1618 	return size;
1619 }
1620 static DEVICE_ATTR_RW(cntr_val);
1621 
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1622 static ssize_t cntr_ctrl_show(struct device *dev,
1623 			      struct device_attribute *attr,
1624 			      char *buf)
1625 {
1626 	u8 idx;
1627 	unsigned long val;
1628 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1629 	struct etmv4_config *config = &drvdata->config;
1630 
1631 	raw_spin_lock(&drvdata->spinlock);
1632 	idx = config->cntr_idx;
1633 	val = config->cntr_ctrl[idx];
1634 	raw_spin_unlock(&drvdata->spinlock);
1635 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1636 }
1637 
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1638 static ssize_t cntr_ctrl_store(struct device *dev,
1639 			       struct device_attribute *attr,
1640 			       const char *buf, size_t size)
1641 {
1642 	u8 idx;
1643 	unsigned long val;
1644 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1645 	struct etmv4_config *config = &drvdata->config;
1646 
1647 	if (kstrtoul(buf, 16, &val))
1648 		return -EINVAL;
1649 
1650 	raw_spin_lock(&drvdata->spinlock);
1651 	idx = config->cntr_idx;
1652 	config->cntr_ctrl[idx] = val;
1653 	raw_spin_unlock(&drvdata->spinlock);
1654 	return size;
1655 }
1656 static DEVICE_ATTR_RW(cntr_ctrl);
1657 
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1658 static ssize_t res_idx_show(struct device *dev,
1659 			    struct device_attribute *attr,
1660 			    char *buf)
1661 {
1662 	unsigned long val;
1663 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1664 	struct etmv4_config *config = &drvdata->config;
1665 
1666 	val = config->res_idx;
1667 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1668 }
1669 
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1670 static ssize_t res_idx_store(struct device *dev,
1671 			     struct device_attribute *attr,
1672 			     const char *buf, size_t size)
1673 {
1674 	unsigned long val;
1675 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1676 	struct etmv4_config *config = &drvdata->config;
1677 
1678 	if (kstrtoul(buf, 16, &val))
1679 		return -EINVAL;
1680 	/*
1681 	 * Resource selector pair 0 is always implemented and reserved,
1682 	 * namely an idx with 0 and 1 is illegal.
1683 	 */
1684 	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1685 		return -EINVAL;
1686 
1687 	/*
1688 	 * Use spinlock to ensure index doesn't change while it gets
1689 	 * dereferenced multiple times within a spinlock block elsewhere.
1690 	 */
1691 	raw_spin_lock(&drvdata->spinlock);
1692 	config->res_idx = val;
1693 	raw_spin_unlock(&drvdata->spinlock);
1694 	return size;
1695 }
1696 static DEVICE_ATTR_RW(res_idx);
1697 
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1698 static ssize_t res_ctrl_show(struct device *dev,
1699 			     struct device_attribute *attr,
1700 			     char *buf)
1701 {
1702 	u8 idx;
1703 	unsigned long val;
1704 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1705 	struct etmv4_config *config = &drvdata->config;
1706 
1707 	raw_spin_lock(&drvdata->spinlock);
1708 	idx = config->res_idx;
1709 	val = config->res_ctrl[idx];
1710 	raw_spin_unlock(&drvdata->spinlock);
1711 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1712 }
1713 
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1714 static ssize_t res_ctrl_store(struct device *dev,
1715 			      struct device_attribute *attr,
1716 			      const char *buf, size_t size)
1717 {
1718 	u8 idx;
1719 	unsigned long val;
1720 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1721 	struct etmv4_config *config = &drvdata->config;
1722 
1723 	if (kstrtoul(buf, 16, &val))
1724 		return -EINVAL;
1725 
1726 	raw_spin_lock(&drvdata->spinlock);
1727 	idx = config->res_idx;
1728 	/* For odd idx pair inversal bit is RES0 */
1729 	if (idx % 2 != 0)
1730 		/* PAIRINV, bit[21] */
1731 		val &= ~TRCRSCTLRn_PAIRINV;
1732 	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1733 				       TRCRSCTLRn_INV |
1734 				       TRCRSCTLRn_GROUP_MASK |
1735 				       TRCRSCTLRn_SELECT_MASK);
1736 	raw_spin_unlock(&drvdata->spinlock);
1737 	return size;
1738 }
1739 static DEVICE_ATTR_RW(res_ctrl);
1740 
sshot_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1741 static ssize_t sshot_idx_show(struct device *dev,
1742 			      struct device_attribute *attr, char *buf)
1743 {
1744 	unsigned long val;
1745 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1746 	struct etmv4_config *config = &drvdata->config;
1747 
1748 	val = config->ss_idx;
1749 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1750 }
1751 
sshot_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1752 static ssize_t sshot_idx_store(struct device *dev,
1753 			       struct device_attribute *attr,
1754 			       const char *buf, size_t size)
1755 {
1756 	unsigned long val;
1757 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1758 	struct etmv4_config *config = &drvdata->config;
1759 
1760 	if (kstrtoul(buf, 16, &val))
1761 		return -EINVAL;
1762 	if (val >= drvdata->nr_ss_cmp)
1763 		return -EINVAL;
1764 
1765 	raw_spin_lock(&drvdata->spinlock);
1766 	config->ss_idx = val;
1767 	raw_spin_unlock(&drvdata->spinlock);
1768 	return size;
1769 }
1770 static DEVICE_ATTR_RW(sshot_idx);
1771 
sshot_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1772 static ssize_t sshot_ctrl_show(struct device *dev,
1773 			       struct device_attribute *attr,
1774 			       char *buf)
1775 {
1776 	unsigned long val;
1777 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1778 	struct etmv4_config *config = &drvdata->config;
1779 
1780 	raw_spin_lock(&drvdata->spinlock);
1781 	val = config->ss_ctrl[config->ss_idx];
1782 	raw_spin_unlock(&drvdata->spinlock);
1783 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1784 }
1785 
sshot_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1786 static ssize_t sshot_ctrl_store(struct device *dev,
1787 				struct device_attribute *attr,
1788 				const char *buf, size_t size)
1789 {
1790 	u8 idx;
1791 	unsigned long val;
1792 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1793 	struct etmv4_config *config = &drvdata->config;
1794 
1795 	if (kstrtoul(buf, 16, &val))
1796 		return -EINVAL;
1797 
1798 	raw_spin_lock(&drvdata->spinlock);
1799 	idx = config->ss_idx;
1800 	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1801 	/* must clear bit 31 in related status register on programming */
1802 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1803 	raw_spin_unlock(&drvdata->spinlock);
1804 	return size;
1805 }
1806 static DEVICE_ATTR_RW(sshot_ctrl);
1807 
sshot_status_show(struct device * dev,struct device_attribute * attr,char * buf)1808 static ssize_t sshot_status_show(struct device *dev,
1809 				 struct device_attribute *attr, char *buf)
1810 {
1811 	unsigned long val;
1812 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1813 	struct etmv4_config *config = &drvdata->config;
1814 
1815 	raw_spin_lock(&drvdata->spinlock);
1816 	val = config->ss_status[config->ss_idx];
1817 	raw_spin_unlock(&drvdata->spinlock);
1818 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1819 }
1820 static DEVICE_ATTR_RO(sshot_status);
1821 
sshot_pe_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1822 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1823 				  struct device_attribute *attr,
1824 				  char *buf)
1825 {
1826 	unsigned long val;
1827 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1828 	struct etmv4_config *config = &drvdata->config;
1829 
1830 	raw_spin_lock(&drvdata->spinlock);
1831 	val = config->ss_pe_cmp[config->ss_idx];
1832 	raw_spin_unlock(&drvdata->spinlock);
1833 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1834 }
1835 
sshot_pe_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1836 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1837 				   struct device_attribute *attr,
1838 				   const char *buf, size_t size)
1839 {
1840 	u8 idx;
1841 	unsigned long val;
1842 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1843 	struct etmv4_config *config = &drvdata->config;
1844 
1845 	if (kstrtoul(buf, 16, &val))
1846 		return -EINVAL;
1847 
1848 	raw_spin_lock(&drvdata->spinlock);
1849 	idx = config->ss_idx;
1850 	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1851 	/* must clear bit 31 in related status register on programming */
1852 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1853 	raw_spin_unlock(&drvdata->spinlock);
1854 	return size;
1855 }
1856 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1857 
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1858 static ssize_t ctxid_idx_show(struct device *dev,
1859 			      struct device_attribute *attr,
1860 			      char *buf)
1861 {
1862 	unsigned long val;
1863 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1864 	struct etmv4_config *config = &drvdata->config;
1865 
1866 	val = config->ctxid_idx;
1867 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1868 }
1869 
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1870 static ssize_t ctxid_idx_store(struct device *dev,
1871 			       struct device_attribute *attr,
1872 			       const char *buf, size_t size)
1873 {
1874 	unsigned long val;
1875 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1876 	struct etmv4_config *config = &drvdata->config;
1877 
1878 	if (kstrtoul(buf, 16, &val))
1879 		return -EINVAL;
1880 	if (val >= drvdata->numcidc)
1881 		return -EINVAL;
1882 
1883 	/*
1884 	 * Use spinlock to ensure index doesn't change while it gets
1885 	 * dereferenced multiple times within a spinlock block elsewhere.
1886 	 */
1887 	raw_spin_lock(&drvdata->spinlock);
1888 	config->ctxid_idx = val;
1889 	raw_spin_unlock(&drvdata->spinlock);
1890 	return size;
1891 }
1892 static DEVICE_ATTR_RW(ctxid_idx);
1893 
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1894 static ssize_t ctxid_pid_show(struct device *dev,
1895 			      struct device_attribute *attr,
1896 			      char *buf)
1897 {
1898 	u8 idx;
1899 	unsigned long val;
1900 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1901 	struct etmv4_config *config = &drvdata->config;
1902 
1903 	/*
1904 	 * Don't use contextID tracing if coming from a PID namespace.  See
1905 	 * comment in ctxid_pid_store().
1906 	 */
1907 	if (task_active_pid_ns(current) != &init_pid_ns)
1908 		return -EINVAL;
1909 
1910 	raw_spin_lock(&drvdata->spinlock);
1911 	idx = config->ctxid_idx;
1912 	val = (unsigned long)config->ctxid_pid[idx];
1913 	raw_spin_unlock(&drvdata->spinlock);
1914 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1915 }
1916 
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1917 static ssize_t ctxid_pid_store(struct device *dev,
1918 			       struct device_attribute *attr,
1919 			       const char *buf, size_t size)
1920 {
1921 	u8 idx;
1922 	unsigned long pid;
1923 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1924 	struct etmv4_config *config = &drvdata->config;
1925 
1926 	/*
1927 	 * When contextID tracing is enabled the tracers will insert the
1928 	 * value found in the contextID register in the trace stream.  But if
1929 	 * a process is in a namespace the PID of that process as seen from the
1930 	 * namespace won't be what the kernel sees, something that makes the
1931 	 * feature confusing and can potentially leak kernel only information.
1932 	 * As such refuse to use the feature if @current is not in the initial
1933 	 * PID namespace.
1934 	 */
1935 	if (task_active_pid_ns(current) != &init_pid_ns)
1936 		return -EINVAL;
1937 
1938 	/*
1939 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1940 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1941 	 * in length
1942 	 */
1943 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1944 		return -EINVAL;
1945 	if (kstrtoul(buf, 16, &pid))
1946 		return -EINVAL;
1947 
1948 	raw_spin_lock(&drvdata->spinlock);
1949 	idx = config->ctxid_idx;
1950 	config->ctxid_pid[idx] = (u64)pid;
1951 	raw_spin_unlock(&drvdata->spinlock);
1952 	return size;
1953 }
1954 static DEVICE_ATTR_RW(ctxid_pid);
1955 
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1956 static ssize_t ctxid_masks_show(struct device *dev,
1957 				struct device_attribute *attr,
1958 				char *buf)
1959 {
1960 	unsigned long val1, val2;
1961 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1962 	struct etmv4_config *config = &drvdata->config;
1963 
1964 	/*
1965 	 * Don't use contextID tracing if coming from a PID namespace.  See
1966 	 * comment in ctxid_pid_store().
1967 	 */
1968 	if (task_active_pid_ns(current) != &init_pid_ns)
1969 		return -EINVAL;
1970 
1971 	raw_spin_lock(&drvdata->spinlock);
1972 	val1 = config->ctxid_mask0;
1973 	val2 = config->ctxid_mask1;
1974 	raw_spin_unlock(&drvdata->spinlock);
1975 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1976 }
1977 
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1978 static ssize_t ctxid_masks_store(struct device *dev,
1979 				struct device_attribute *attr,
1980 				const char *buf, size_t size)
1981 {
1982 	u8 i, j, maskbyte;
1983 	unsigned long val1, val2, mask;
1984 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1985 	struct etmv4_config *config = &drvdata->config;
1986 	int nr_inputs;
1987 
1988 	/*
1989 	 * Don't use contextID tracing if coming from a PID namespace.  See
1990 	 * comment in ctxid_pid_store().
1991 	 */
1992 	if (task_active_pid_ns(current) != &init_pid_ns)
1993 		return -EINVAL;
1994 
1995 	/*
1996 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1997 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1998 	 * in length
1999 	 */
2000 	if (!drvdata->ctxid_size || !drvdata->numcidc)
2001 		return -EINVAL;
2002 	/* one mask if <= 4 comparators, two for up to 8 */
2003 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2004 	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2005 		return -EINVAL;
2006 
2007 	raw_spin_lock(&drvdata->spinlock);
2008 	/*
2009 	 * each byte[0..3] controls mask value applied to ctxid
2010 	 * comparator[0..3]
2011 	 */
2012 	switch (drvdata->numcidc) {
2013 	case 0x1:
2014 		/* COMP0, bits[7:0] */
2015 		config->ctxid_mask0 = val1 & 0xFF;
2016 		break;
2017 	case 0x2:
2018 		/* COMP1, bits[15:8] */
2019 		config->ctxid_mask0 = val1 & 0xFFFF;
2020 		break;
2021 	case 0x3:
2022 		/* COMP2, bits[23:16] */
2023 		config->ctxid_mask0 = val1 & 0xFFFFFF;
2024 		break;
2025 	case 0x4:
2026 		 /* COMP3, bits[31:24] */
2027 		config->ctxid_mask0 = val1;
2028 		break;
2029 	case 0x5:
2030 		/* COMP4, bits[7:0] */
2031 		config->ctxid_mask0 = val1;
2032 		config->ctxid_mask1 = val2 & 0xFF;
2033 		break;
2034 	case 0x6:
2035 		/* COMP5, bits[15:8] */
2036 		config->ctxid_mask0 = val1;
2037 		config->ctxid_mask1 = val2 & 0xFFFF;
2038 		break;
2039 	case 0x7:
2040 		/* COMP6, bits[23:16] */
2041 		config->ctxid_mask0 = val1;
2042 		config->ctxid_mask1 = val2 & 0xFFFFFF;
2043 		break;
2044 	case 0x8:
2045 		/* COMP7, bits[31:24] */
2046 		config->ctxid_mask0 = val1;
2047 		config->ctxid_mask1 = val2;
2048 		break;
2049 	default:
2050 		break;
2051 	}
2052 	/*
2053 	 * If software sets a mask bit to 1, it must program relevant byte
2054 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2055 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2056 	 * of ctxid comparator0 value (corresponding to byte 0) register.
2057 	 */
2058 	mask = config->ctxid_mask0;
2059 	for (i = 0; i < drvdata->numcidc; i++) {
2060 		/* mask value of corresponding ctxid comparator */
2061 		maskbyte = mask & ETMv4_EVENT_MASK;
2062 		/*
2063 		 * each bit corresponds to a byte of respective ctxid comparator
2064 		 * value register
2065 		 */
2066 		for (j = 0; j < 8; j++) {
2067 			if (maskbyte & 1)
2068 				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2069 			maskbyte >>= 1;
2070 		}
2071 		/* Select the next ctxid comparator mask value */
2072 		if (i == 3)
2073 			/* ctxid comparators[4-7] */
2074 			mask = config->ctxid_mask1;
2075 		else
2076 			mask >>= 0x8;
2077 	}
2078 
2079 	raw_spin_unlock(&drvdata->spinlock);
2080 	return size;
2081 }
2082 static DEVICE_ATTR_RW(ctxid_masks);
2083 
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)2084 static ssize_t vmid_idx_show(struct device *dev,
2085 			     struct device_attribute *attr,
2086 			     char *buf)
2087 {
2088 	unsigned long val;
2089 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2090 	struct etmv4_config *config = &drvdata->config;
2091 
2092 	val = config->vmid_idx;
2093 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2094 }
2095 
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2096 static ssize_t vmid_idx_store(struct device *dev,
2097 			      struct device_attribute *attr,
2098 			      const char *buf, size_t size)
2099 {
2100 	unsigned long val;
2101 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2102 	struct etmv4_config *config = &drvdata->config;
2103 
2104 	if (kstrtoul(buf, 16, &val))
2105 		return -EINVAL;
2106 	if (val >= drvdata->numvmidc)
2107 		return -EINVAL;
2108 
2109 	/*
2110 	 * Use spinlock to ensure index doesn't change while it gets
2111 	 * dereferenced multiple times within a spinlock block elsewhere.
2112 	 */
2113 	raw_spin_lock(&drvdata->spinlock);
2114 	config->vmid_idx = val;
2115 	raw_spin_unlock(&drvdata->spinlock);
2116 	return size;
2117 }
2118 static DEVICE_ATTR_RW(vmid_idx);
2119 
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)2120 static ssize_t vmid_val_show(struct device *dev,
2121 			     struct device_attribute *attr,
2122 			     char *buf)
2123 {
2124 	unsigned long val;
2125 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2126 	struct etmv4_config *config = &drvdata->config;
2127 
2128 	/*
2129 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2130 	 * See comment in ctxid_pid_store().
2131 	 */
2132 	if (!task_is_in_init_pid_ns(current))
2133 		return -EINVAL;
2134 
2135 	raw_spin_lock(&drvdata->spinlock);
2136 	val = (unsigned long)config->vmid_val[config->vmid_idx];
2137 	raw_spin_unlock(&drvdata->spinlock);
2138 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2139 }
2140 
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2141 static ssize_t vmid_val_store(struct device *dev,
2142 			      struct device_attribute *attr,
2143 			      const char *buf, size_t size)
2144 {
2145 	unsigned long val;
2146 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2147 	struct etmv4_config *config = &drvdata->config;
2148 
2149 	/*
2150 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2151 	 * See comment in ctxid_pid_store().
2152 	 */
2153 	if (!task_is_in_init_pid_ns(current))
2154 		return -EINVAL;
2155 
2156 	/*
2157 	 * only implemented when vmid tracing is enabled, i.e. at least one
2158 	 * vmid comparator is implemented and at least 8 bit vmid size
2159 	 */
2160 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2161 		return -EINVAL;
2162 	if (kstrtoul(buf, 16, &val))
2163 		return -EINVAL;
2164 
2165 	raw_spin_lock(&drvdata->spinlock);
2166 	config->vmid_val[config->vmid_idx] = (u64)val;
2167 	raw_spin_unlock(&drvdata->spinlock);
2168 	return size;
2169 }
2170 static DEVICE_ATTR_RW(vmid_val);
2171 
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)2172 static ssize_t vmid_masks_show(struct device *dev,
2173 			       struct device_attribute *attr, char *buf)
2174 {
2175 	unsigned long val1, val2;
2176 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2177 	struct etmv4_config *config = &drvdata->config;
2178 
2179 	/*
2180 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2181 	 * See comment in ctxid_pid_store().
2182 	 */
2183 	if (!task_is_in_init_pid_ns(current))
2184 		return -EINVAL;
2185 
2186 	raw_spin_lock(&drvdata->spinlock);
2187 	val1 = config->vmid_mask0;
2188 	val2 = config->vmid_mask1;
2189 	raw_spin_unlock(&drvdata->spinlock);
2190 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2191 }
2192 
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2193 static ssize_t vmid_masks_store(struct device *dev,
2194 				struct device_attribute *attr,
2195 				const char *buf, size_t size)
2196 {
2197 	u8 i, j, maskbyte;
2198 	unsigned long val1, val2, mask;
2199 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2200 	struct etmv4_config *config = &drvdata->config;
2201 	int nr_inputs;
2202 
2203 	/*
2204 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2205 	 * See comment in ctxid_pid_store().
2206 	 */
2207 	if (!task_is_in_init_pid_ns(current))
2208 		return -EINVAL;
2209 
2210 	/*
2211 	 * only implemented when vmid tracing is enabled, i.e. at least one
2212 	 * vmid comparator is implemented and at least 8 bit vmid size
2213 	 */
2214 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2215 		return -EINVAL;
2216 	/* one mask if <= 4 comparators, two for up to 8 */
2217 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2218 	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2219 		return -EINVAL;
2220 
2221 	raw_spin_lock(&drvdata->spinlock);
2222 
2223 	/*
2224 	 * each byte[0..3] controls mask value applied to vmid
2225 	 * comparator[0..3]
2226 	 */
2227 	switch (drvdata->numvmidc) {
2228 	case 0x1:
2229 		/* COMP0, bits[7:0] */
2230 		config->vmid_mask0 = val1 & 0xFF;
2231 		break;
2232 	case 0x2:
2233 		/* COMP1, bits[15:8] */
2234 		config->vmid_mask0 = val1 & 0xFFFF;
2235 		break;
2236 	case 0x3:
2237 		/* COMP2, bits[23:16] */
2238 		config->vmid_mask0 = val1 & 0xFFFFFF;
2239 		break;
2240 	case 0x4:
2241 		/* COMP3, bits[31:24] */
2242 		config->vmid_mask0 = val1;
2243 		break;
2244 	case 0x5:
2245 		/* COMP4, bits[7:0] */
2246 		config->vmid_mask0 = val1;
2247 		config->vmid_mask1 = val2 & 0xFF;
2248 		break;
2249 	case 0x6:
2250 		/* COMP5, bits[15:8] */
2251 		config->vmid_mask0 = val1;
2252 		config->vmid_mask1 = val2 & 0xFFFF;
2253 		break;
2254 	case 0x7:
2255 		/* COMP6, bits[23:16] */
2256 		config->vmid_mask0 = val1;
2257 		config->vmid_mask1 = val2 & 0xFFFFFF;
2258 		break;
2259 	case 0x8:
2260 		/* COMP7, bits[31:24] */
2261 		config->vmid_mask0 = val1;
2262 		config->vmid_mask1 = val2;
2263 		break;
2264 	default:
2265 		break;
2266 	}
2267 
2268 	/*
2269 	 * If software sets a mask bit to 1, it must program relevant byte
2270 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2271 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2272 	 * of vmid comparator0 value (corresponding to byte 0) register.
2273 	 */
2274 	mask = config->vmid_mask0;
2275 	for (i = 0; i < drvdata->numvmidc; i++) {
2276 		/* mask value of corresponding vmid comparator */
2277 		maskbyte = mask & ETMv4_EVENT_MASK;
2278 		/*
2279 		 * each bit corresponds to a byte of respective vmid comparator
2280 		 * value register
2281 		 */
2282 		for (j = 0; j < 8; j++) {
2283 			if (maskbyte & 1)
2284 				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2285 			maskbyte >>= 1;
2286 		}
2287 		/* Select the next vmid comparator mask value */
2288 		if (i == 3)
2289 			/* vmid comparators[4-7] */
2290 			mask = config->vmid_mask1;
2291 		else
2292 			mask >>= 0x8;
2293 	}
2294 	raw_spin_unlock(&drvdata->spinlock);
2295 	return size;
2296 }
2297 static DEVICE_ATTR_RW(vmid_masks);
2298 
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)2299 static ssize_t cpu_show(struct device *dev,
2300 			struct device_attribute *attr, char *buf)
2301 {
2302 	int val;
2303 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2304 
2305 	val = drvdata->cpu;
2306 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2307 
2308 }
2309 static DEVICE_ATTR_RO(cpu);
2310 
ts_source_show(struct device * dev,struct device_attribute * attr,char * buf)2311 static ssize_t ts_source_show(struct device *dev,
2312 			      struct device_attribute *attr,
2313 			      char *buf)
2314 {
2315 	int val;
2316 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2317 
2318 	if (!drvdata->trfcr) {
2319 		val = -1;
2320 		goto out;
2321 	}
2322 
2323 	val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
2324 	switch (val) {
2325 	case TRFCR_EL1_TS_VIRTUAL:
2326 	case TRFCR_EL1_TS_GUEST_PHYSICAL:
2327 	case TRFCR_EL1_TS_PHYSICAL:
2328 		break;
2329 	default:
2330 		val = -1;
2331 		break;
2332 	}
2333 
2334 out:
2335 	return sysfs_emit(buf, "%d\n", val);
2336 }
2337 static DEVICE_ATTR_RO(ts_source);
2338 
2339 static struct attribute *coresight_etmv4_attrs[] = {
2340 	&dev_attr_nr_pe_cmp.attr,
2341 	&dev_attr_nr_addr_cmp.attr,
2342 	&dev_attr_nr_cntr.attr,
2343 	&dev_attr_nr_ext_inp.attr,
2344 	&dev_attr_numcidc.attr,
2345 	&dev_attr_numvmidc.attr,
2346 	&dev_attr_nrseqstate.attr,
2347 	&dev_attr_nr_resource.attr,
2348 	&dev_attr_nr_ss_cmp.attr,
2349 	&dev_attr_reset.attr,
2350 	&dev_attr_mode.attr,
2351 	&dev_attr_pe.attr,
2352 	&dev_attr_event.attr,
2353 	&dev_attr_event_instren.attr,
2354 	&dev_attr_event_ts.attr,
2355 	&dev_attr_syncfreq.attr,
2356 	&dev_attr_cyc_threshold.attr,
2357 	&dev_attr_bb_ctrl.attr,
2358 	&dev_attr_event_vinst.attr,
2359 	&dev_attr_s_exlevel_vinst.attr,
2360 	&dev_attr_ns_exlevel_vinst.attr,
2361 	&dev_attr_addr_idx.attr,
2362 	&dev_attr_addr_instdatatype.attr,
2363 	&dev_attr_addr_single.attr,
2364 	&dev_attr_addr_range.attr,
2365 	&dev_attr_addr_start.attr,
2366 	&dev_attr_addr_stop.attr,
2367 	&dev_attr_addr_ctxtype.attr,
2368 	&dev_attr_addr_context.attr,
2369 	&dev_attr_addr_exlevel_s_ns.attr,
2370 	&dev_attr_addr_cmp_view.attr,
2371 	&dev_attr_vinst_pe_cmp_start_stop.attr,
2372 	&dev_attr_sshot_idx.attr,
2373 	&dev_attr_sshot_ctrl.attr,
2374 	&dev_attr_sshot_pe_ctrl.attr,
2375 	&dev_attr_sshot_status.attr,
2376 	&dev_attr_seq_idx.attr,
2377 	&dev_attr_seq_state.attr,
2378 	&dev_attr_seq_event.attr,
2379 	&dev_attr_seq_reset_event.attr,
2380 	&dev_attr_cntr_idx.attr,
2381 	&dev_attr_cntrldvr.attr,
2382 	&dev_attr_cntr_val.attr,
2383 	&dev_attr_cntr_ctrl.attr,
2384 	&dev_attr_res_idx.attr,
2385 	&dev_attr_res_ctrl.attr,
2386 	&dev_attr_ctxid_idx.attr,
2387 	&dev_attr_ctxid_pid.attr,
2388 	&dev_attr_ctxid_masks.attr,
2389 	&dev_attr_vmid_idx.attr,
2390 	&dev_attr_vmid_val.attr,
2391 	&dev_attr_vmid_masks.attr,
2392 	&dev_attr_cpu.attr,
2393 	&dev_attr_ts_source.attr,
2394 	NULL,
2395 };
2396 
2397 /*
2398  * Trace ID allocated dynamically on enable - but also allocate on read
2399  * in case sysfs or perf read before enable to ensure consistent metadata
2400  * information for trace decode
2401  */
trctraceid_show(struct device * dev,struct device_attribute * attr,char * buf)2402 static ssize_t trctraceid_show(struct device *dev,
2403 			       struct device_attribute *attr,
2404 			       char *buf)
2405 {
2406 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2407 	int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL);
2408 
2409 	if (trace_id < 0)
2410 		return trace_id;
2411 
2412 	return sysfs_emit(buf, "0x%x\n", trace_id);
2413 }
2414 
2415 struct etmv4_reg {
2416 	struct coresight_device *csdev;
2417 	u32 offset;
2418 	u32 data;
2419 };
2420 
do_smp_cross_read(void * data)2421 static void do_smp_cross_read(void *data)
2422 {
2423 	struct etmv4_reg *reg = data;
2424 
2425 	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2426 }
2427 
etmv4_cross_read(const struct etmv4_drvdata * drvdata,u32 offset)2428 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2429 {
2430 	struct etmv4_reg reg;
2431 
2432 	reg.offset = offset;
2433 	reg.csdev = drvdata->csdev;
2434 
2435 	/*
2436 	 * smp cross call ensures the CPU will be powered up before
2437 	 * accessing the ETMv4 trace core registers
2438 	 */
2439 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2440 	return reg.data;
2441 }
2442 
coresight_etm4x_attr_to_offset(struct device_attribute * attr)2443 static u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2444 {
2445 	struct dev_ext_attribute *eattr;
2446 
2447 	eattr = container_of(attr, struct dev_ext_attribute, attr);
2448 	return (u32)(unsigned long)eattr->var;
2449 }
2450 
coresight_etm4x_reg_show(struct device * dev,struct device_attribute * d_attr,char * buf)2451 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2452 					struct device_attribute *d_attr,
2453 					char *buf)
2454 {
2455 	u32 val, offset;
2456 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2457 
2458 	offset = coresight_etm4x_attr_to_offset(d_attr);
2459 
2460 	pm_runtime_get_sync(dev->parent);
2461 	val = etmv4_cross_read(drvdata, offset);
2462 	pm_runtime_put_sync(dev->parent);
2463 
2464 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2465 }
2466 
2467 static bool
etm4x_register_implemented(struct etmv4_drvdata * drvdata,u32 offset)2468 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2469 {
2470 	switch (offset) {
2471 	ETM_COMMON_SYSREG_LIST_CASES
2472 		/*
2473 		 * Common registers to ETE & ETM4x accessible via system
2474 		 * instructions are always implemented.
2475 		 */
2476 		return true;
2477 
2478 	ETM4x_ONLY_SYSREG_LIST_CASES
2479 		/*
2480 		 * We only support etm4x and ete. So if the device is not
2481 		 * ETE, it must be ETMv4x.
2482 		 */
2483 		return !etm4x_is_ete(drvdata);
2484 
2485 	ETM4x_MMAP_LIST_CASES
2486 		/*
2487 		 * Registers accessible only via memory-mapped registers
2488 		 * must not be accessed via system instructions.
2489 		 * We cannot access the drvdata->csdev here, as this
2490 		 * function is called during the device creation, via
2491 		 * coresight_register() and the csdev is not initialized
2492 		 * until that is done. So rely on the drvdata->base to
2493 		 * detect if we have a memory mapped access.
2494 		 * Also ETE doesn't implement memory mapped access, thus
2495 		 * it is sufficient to check that we are using mmio.
2496 		 */
2497 		return !!drvdata->base;
2498 
2499 	ETE_ONLY_SYSREG_LIST_CASES
2500 		return etm4x_is_ete(drvdata);
2501 	}
2502 
2503 	return false;
2504 }
2505 
2506 /*
2507  * Hide the ETM4x registers that may not be available on the
2508  * hardware.
2509  * There are certain management registers unavailable via system
2510  * instructions. Make those sysfs attributes hidden on such
2511  * systems.
2512  */
2513 static umode_t
coresight_etm4x_attr_reg_implemented(struct kobject * kobj,struct attribute * attr,int unused)2514 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2515 				     struct attribute *attr, int unused)
2516 {
2517 	struct device *dev = kobj_to_dev(kobj);
2518 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2519 	struct device_attribute *d_attr;
2520 	u32 offset;
2521 
2522 	d_attr = container_of(attr, struct device_attribute, attr);
2523 	offset = coresight_etm4x_attr_to_offset(d_attr);
2524 
2525 	if (etm4x_register_implemented(drvdata, offset))
2526 		return attr->mode;
2527 	return 0;
2528 }
2529 
2530 /*
2531  * Macro to set an RO ext attribute with offset and show function.
2532  * Offset is used in mgmt group to ensure only correct registers for
2533  * the ETM / ETE variant are visible.
2534  */
2535 #define coresight_etm4x_reg_showfn(name, offset, showfn) (	\
2536 	&((struct dev_ext_attribute[]) {			\
2537 	   {							\
2538 		__ATTR(name, 0444, showfn, NULL),		\
2539 		(void *)(unsigned long)offset			\
2540 	   }							\
2541 	})[0].attr.attr						\
2542 	)
2543 
2544 /* macro using the default coresight_etm4x_reg_show function */
2545 #define coresight_etm4x_reg(name, offset)	\
2546 	coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
2547 
2548 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2549 	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2550 	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2551 	coresight_etm4x_reg(trclsr, TRCLSR),
2552 	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2553 	coresight_etm4x_reg(trcdevid, TRCDEVID),
2554 	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2555 	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2556 	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2557 	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2558 	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2559 	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2560 	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2561 	coresight_etm4x_reg_showfn(trctraceid, TRCTRACEIDR, trctraceid_show),
2562 	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2563 	NULL,
2564 };
2565 
2566 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2567 	coresight_etm4x_reg(trcidr0, TRCIDR0),
2568 	coresight_etm4x_reg(trcidr1, TRCIDR1),
2569 	coresight_etm4x_reg(trcidr2, TRCIDR2),
2570 	coresight_etm4x_reg(trcidr3, TRCIDR3),
2571 	coresight_etm4x_reg(trcidr4, TRCIDR4),
2572 	coresight_etm4x_reg(trcidr5, TRCIDR5),
2573 	/* trcidr[6,7] are reserved */
2574 	coresight_etm4x_reg(trcidr8, TRCIDR8),
2575 	coresight_etm4x_reg(trcidr9, TRCIDR9),
2576 	coresight_etm4x_reg(trcidr10, TRCIDR10),
2577 	coresight_etm4x_reg(trcidr11, TRCIDR11),
2578 	coresight_etm4x_reg(trcidr12, TRCIDR12),
2579 	coresight_etm4x_reg(trcidr13, TRCIDR13),
2580 	NULL,
2581 };
2582 
2583 static const struct attribute_group coresight_etmv4_group = {
2584 	.attrs = coresight_etmv4_attrs,
2585 };
2586 
2587 static const struct attribute_group coresight_etmv4_mgmt_group = {
2588 	.is_visible = coresight_etm4x_attr_reg_implemented,
2589 	.attrs = coresight_etmv4_mgmt_attrs,
2590 	.name = "mgmt",
2591 };
2592 
2593 static const struct attribute_group coresight_etmv4_trcidr_group = {
2594 	.attrs = coresight_etmv4_trcidr_attrs,
2595 	.name = "trcidr",
2596 };
2597 
2598 const struct attribute_group *coresight_etmv4_groups[] = {
2599 	&coresight_etmv4_group,
2600 	&coresight_etmv4_mgmt_group,
2601 	&coresight_etmv4_trcidr_group,
2602 	NULL,
2603 };
2604