xref: /linux/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
13 
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
15 {
16 	u8 idx;
17 	struct etmv4_config *config = &drvdata->config;
18 
19 	idx = config->addr_idx;
20 
21 	/*
22 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 	 * the trace unit performs
24 	 */
25 	if (FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]) == TRCACATRn_TYPE_ADDR) {
26 		if (idx % 2 != 0)
27 			return -EINVAL;
28 
29 		/*
30 		 * We are performing instruction address comparison. Set the
31 		 * relevant bit of ViewInst Include/Exclude Control register
32 		 * for corresponding address comparator pair.
33 		 */
34 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
36 			return -EINVAL;
37 
38 		if (exclude == true) {
39 			/*
40 			 * Set exclude bit and unset the include bit
41 			 * corresponding to comparator pair
42 			 */
43 			config->viiectlr |= BIT(idx / 2 + 16);
44 			config->viiectlr &= ~BIT(idx / 2);
45 		} else {
46 			/*
47 			 * Set include bit and unset exclude bit
48 			 * corresponding to comparator pair
49 			 */
50 			config->viiectlr |= BIT(idx / 2);
51 			config->viiectlr &= ~BIT(idx / 2 + 16);
52 		}
53 	}
54 	return 0;
55 }
56 
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)57 static ssize_t nr_pe_cmp_show(struct device *dev,
58 			      struct device_attribute *attr,
59 			      char *buf)
60 {
61 	unsigned long val;
62 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 
64 	val = drvdata->nr_pe_cmp;
65 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 }
67 static DEVICE_ATTR_RO(nr_pe_cmp);
68 
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)69 static ssize_t nr_addr_cmp_show(struct device *dev,
70 				struct device_attribute *attr,
71 				char *buf)
72 {
73 	unsigned long val;
74 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 
76 	val = drvdata->nr_addr_cmp;
77 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 }
79 static DEVICE_ATTR_RO(nr_addr_cmp);
80 
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t nr_cntr_show(struct device *dev,
82 			    struct device_attribute *attr,
83 			    char *buf)
84 {
85 	unsigned long val;
86 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 
88 	val = drvdata->nr_cntr;
89 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 }
91 static DEVICE_ATTR_RO(nr_cntr);
92 
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t nr_ext_inp_show(struct device *dev,
94 			       struct device_attribute *attr,
95 			       char *buf)
96 {
97 	unsigned long val;
98 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 
100 	val = drvdata->nr_ext_inp;
101 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 }
103 static DEVICE_ATTR_RO(nr_ext_inp);
104 
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)105 static ssize_t numcidc_show(struct device *dev,
106 			    struct device_attribute *attr,
107 			    char *buf)
108 {
109 	unsigned long val;
110 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 
112 	val = drvdata->numcidc;
113 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 }
115 static DEVICE_ATTR_RO(numcidc);
116 
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)117 static ssize_t numvmidc_show(struct device *dev,
118 			     struct device_attribute *attr,
119 			     char *buf)
120 {
121 	unsigned long val;
122 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 
124 	val = drvdata->numvmidc;
125 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 }
127 static DEVICE_ATTR_RO(numvmidc);
128 
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)129 static ssize_t nrseqstate_show(struct device *dev,
130 			       struct device_attribute *attr,
131 			       char *buf)
132 {
133 	unsigned long val;
134 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 
136 	val = drvdata->nrseqstate;
137 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 }
139 static DEVICE_ATTR_RO(nrseqstate);
140 
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t nr_resource_show(struct device *dev,
142 				struct device_attribute *attr,
143 				char *buf)
144 {
145 	unsigned long val;
146 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 
148 	val = drvdata->nr_resource;
149 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 }
151 static DEVICE_ATTR_RO(nr_resource);
152 
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t nr_ss_cmp_show(struct device *dev,
154 			      struct device_attribute *attr,
155 			      char *buf)
156 {
157 	unsigned long val;
158 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 
160 	val = drvdata->nr_ss_cmp;
161 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 }
163 static DEVICE_ATTR_RO(nr_ss_cmp);
164 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)165 static ssize_t reset_store(struct device *dev,
166 			   struct device_attribute *attr,
167 			   const char *buf, size_t size)
168 {
169 	int i;
170 	unsigned long val;
171 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 	struct etmv4_config *config = &drvdata->config;
173 
174 	if (kstrtoul(buf, 16, &val))
175 		return -EINVAL;
176 
177 	spin_lock(&drvdata->spinlock);
178 	if (val)
179 		config->mode = 0x0;
180 
181 	/* Disable data tracing: do not trace load and store data transfers */
182 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 	config->cfg &= ~(TRCCONFIGR_INSTP0_LOAD | TRCCONFIGR_INSTP0_STORE);
184 
185 	/* Disable data value and data address tracing */
186 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 			   ETM_MODE_DATA_TRACE_VAL);
188 	config->cfg &= ~(TRCCONFIGR_DA | TRCCONFIGR_DV);
189 
190 	/* Disable all events tracing */
191 	config->eventctrl0 = 0x0;
192 	config->eventctrl1 = 0x0;
193 
194 	/* Disable timestamp event */
195 	config->ts_ctrl = 0x0;
196 
197 	/* Disable stalling */
198 	config->stall_ctrl = 0x0;
199 
200 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
201 	if (drvdata->syncpr == false)
202 		config->syncfreq = 0x8;
203 
204 	/*
205 	 * Enable ViewInst to trace everything with start-stop logic in
206 	 * started state. ARM recommends start-stop logic is set before
207 	 * each trace run.
208 	 */
209 	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
210 	if (drvdata->nr_addr_cmp > 0) {
211 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 		/* SSSTATUS, bit[9] */
213 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
214 	}
215 
216 	/* No address range filtering for ViewInst */
217 	config->viiectlr = 0x0;
218 
219 	/* No start-stop filtering for ViewInst */
220 	config->vissctlr = 0x0;
221 	config->vipcssctlr = 0x0;
222 
223 	/* Disable seq events */
224 	for (i = 0; i < drvdata->nrseqstate-1; i++)
225 		config->seq_ctrl[i] = 0x0;
226 	config->seq_rst = 0x0;
227 	config->seq_state = 0x0;
228 
229 	/* Disable external input events */
230 	config->ext_inp = 0x0;
231 
232 	config->cntr_idx = 0x0;
233 	for (i = 0; i < drvdata->nr_cntr; i++) {
234 		config->cntrldvr[i] = 0x0;
235 		config->cntr_ctrl[i] = 0x0;
236 		config->cntr_val[i] = 0x0;
237 	}
238 
239 	config->res_idx = 0x0;
240 	for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 		config->res_ctrl[i] = 0x0;
242 
243 	config->ss_idx = 0x0;
244 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 		config->ss_ctrl[i] = 0x0;
246 		config->ss_pe_cmp[i] = 0x0;
247 	}
248 
249 	config->addr_idx = 0x0;
250 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 		config->addr_val[i] = 0x0;
252 		config->addr_acc[i] = 0x0;
253 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
254 	}
255 
256 	config->ctxid_idx = 0x0;
257 	for (i = 0; i < drvdata->numcidc; i++)
258 		config->ctxid_pid[i] = 0x0;
259 
260 	config->ctxid_mask0 = 0x0;
261 	config->ctxid_mask1 = 0x0;
262 
263 	config->vmid_idx = 0x0;
264 	for (i = 0; i < drvdata->numvmidc; i++)
265 		config->vmid_val[i] = 0x0;
266 	config->vmid_mask0 = 0x0;
267 	config->vmid_mask1 = 0x0;
268 
269 	spin_unlock(&drvdata->spinlock);
270 
271 	/* for sysfs - only release trace id when resetting */
272 	etm4_release_trace_id(drvdata);
273 
274 	cscfg_csdev_reset_feats(to_coresight_device(dev));
275 
276 	return size;
277 }
278 static DEVICE_ATTR_WO(reset);
279 
mode_show(struct device * dev,struct device_attribute * attr,char * buf)280 static ssize_t mode_show(struct device *dev,
281 			 struct device_attribute *attr,
282 			 char *buf)
283 {
284 	unsigned long val;
285 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
286 	struct etmv4_config *config = &drvdata->config;
287 
288 	val = config->mode;
289 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
290 }
291 
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)292 static ssize_t mode_store(struct device *dev,
293 			  struct device_attribute *attr,
294 			  const char *buf, size_t size)
295 {
296 	unsigned long val, mode;
297 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
298 	struct etmv4_config *config = &drvdata->config;
299 
300 	if (kstrtoul(buf, 16, &val))
301 		return -EINVAL;
302 
303 	spin_lock(&drvdata->spinlock);
304 	config->mode = val & ETMv4_MODE_ALL;
305 
306 	if (drvdata->instrp0 == true) {
307 		/* start by clearing instruction P0 field */
308 		config->cfg  &= ~TRCCONFIGR_INSTP0_LOAD_STORE;
309 		if (config->mode & ETM_MODE_LOAD)
310 			/* 0b01 Trace load instructions as P0 instructions */
311 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD;
312 		if (config->mode & ETM_MODE_STORE)
313 			/* 0b10 Trace store instructions as P0 instructions */
314 			config->cfg  |= TRCCONFIGR_INSTP0_STORE;
315 		if (config->mode & ETM_MODE_LOAD_STORE)
316 			/*
317 			 * 0b11 Trace load and store instructions
318 			 * as P0 instructions
319 			 */
320 			config->cfg  |= TRCCONFIGR_INSTP0_LOAD_STORE;
321 	}
322 
323 	/* bit[3], Branch broadcast mode */
324 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
325 		config->cfg |= TRCCONFIGR_BB;
326 	else
327 		config->cfg &= ~TRCCONFIGR_BB;
328 
329 	/* bit[4], Cycle counting instruction trace bit */
330 	if ((config->mode & ETMv4_MODE_CYCACC) &&
331 		(drvdata->trccci == true))
332 		config->cfg |= TRCCONFIGR_CCI;
333 	else
334 		config->cfg &= ~TRCCONFIGR_CCI;
335 
336 	/* bit[6], Context ID tracing bit */
337 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
338 		config->cfg |= TRCCONFIGR_CID;
339 	else
340 		config->cfg &= ~TRCCONFIGR_CID;
341 
342 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
343 		config->cfg |= TRCCONFIGR_VMID;
344 	else
345 		config->cfg &= ~TRCCONFIGR_VMID;
346 
347 	/* bits[10:8], Conditional instruction tracing bit */
348 	mode = ETM_MODE_COND(config->mode);
349 	if (drvdata->trccond == true) {
350 		config->cfg &= ~TRCCONFIGR_COND_MASK;
351 		config->cfg |= mode << __bf_shf(TRCCONFIGR_COND_MASK);
352 	}
353 
354 	/* bit[11], Global timestamp tracing bit */
355 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
356 		config->cfg |= TRCCONFIGR_TS;
357 	else
358 		config->cfg &= ~TRCCONFIGR_TS;
359 
360 	/* bit[12], Return stack enable bit */
361 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
362 					(drvdata->retstack == true))
363 		config->cfg |= TRCCONFIGR_RS;
364 	else
365 		config->cfg &= ~TRCCONFIGR_RS;
366 
367 	/* bits[14:13], Q element enable field */
368 	mode = ETM_MODE_QELEM(config->mode);
369 	/* start by clearing QE bits */
370 	config->cfg &= ~(TRCCONFIGR_QE_W_COUNTS | TRCCONFIGR_QE_WO_COUNTS);
371 	/*
372 	 * if supported, Q elements with instruction counts are enabled.
373 	 * Always set the low bit for any requested mode. Valid combos are
374 	 * 0b00, 0b01 and 0b11.
375 	 */
376 	if (mode && drvdata->q_support)
377 		config->cfg |= TRCCONFIGR_QE_W_COUNTS;
378 	/*
379 	 * if supported, Q elements with and without instruction
380 	 * counts are enabled
381 	 */
382 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
383 		config->cfg |= TRCCONFIGR_QE_WO_COUNTS;
384 
385 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
386 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
387 	    (drvdata->atbtrig == true))
388 		config->eventctrl1 |= TRCEVENTCTL1R_ATB;
389 	else
390 		config->eventctrl1 &= ~TRCEVENTCTL1R_ATB;
391 
392 	/* bit[12], Low-power state behavior override bit */
393 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
394 	    (drvdata->lpoverride == true))
395 		config->eventctrl1 |= TRCEVENTCTL1R_LPOVERRIDE;
396 	else
397 		config->eventctrl1 &= ~TRCEVENTCTL1R_LPOVERRIDE;
398 
399 	/* bit[8], Instruction stall bit */
400 	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
401 		config->stall_ctrl |= TRCSTALLCTLR_ISTALL;
402 	else
403 		config->stall_ctrl &= ~TRCSTALLCTLR_ISTALL;
404 
405 	/* bit[10], Prioritize instruction trace bit */
406 	if (config->mode & ETM_MODE_INSTPRIO)
407 		config->stall_ctrl |= TRCSTALLCTLR_INSTPRIORITY;
408 	else
409 		config->stall_ctrl &= ~TRCSTALLCTLR_INSTPRIORITY;
410 
411 	/* bit[13], Trace overflow prevention bit */
412 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
413 		(drvdata->nooverflow == true))
414 		config->stall_ctrl |= TRCSTALLCTLR_NOOVERFLOW;
415 	else
416 		config->stall_ctrl &= ~TRCSTALLCTLR_NOOVERFLOW;
417 
418 	/* bit[9] Start/stop logic control bit */
419 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
420 		config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
421 	else
422 		config->vinst_ctrl &= ~TRCVICTLR_SSSTATUS;
423 
424 	/* bit[10], Whether a trace unit must trace a Reset exception */
425 	if (config->mode & ETM_MODE_TRACE_RESET)
426 		config->vinst_ctrl |= TRCVICTLR_TRCRESET;
427 	else
428 		config->vinst_ctrl &= ~TRCVICTLR_TRCRESET;
429 
430 	/* bit[11], Whether a trace unit must trace a system error exception */
431 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
432 		(drvdata->trc_error == true))
433 		config->vinst_ctrl |= TRCVICTLR_TRCERR;
434 	else
435 		config->vinst_ctrl &= ~TRCVICTLR_TRCERR;
436 
437 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
438 		etm4_config_trace_mode(config);
439 
440 	spin_unlock(&drvdata->spinlock);
441 
442 	return size;
443 }
444 static DEVICE_ATTR_RW(mode);
445 
pe_show(struct device * dev,struct device_attribute * attr,char * buf)446 static ssize_t pe_show(struct device *dev,
447 		       struct device_attribute *attr,
448 		       char *buf)
449 {
450 	unsigned long val;
451 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
452 	struct etmv4_config *config = &drvdata->config;
453 
454 	val = config->pe_sel;
455 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
456 }
457 
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)458 static ssize_t pe_store(struct device *dev,
459 			struct device_attribute *attr,
460 			const char *buf, size_t size)
461 {
462 	unsigned long val;
463 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
464 	struct etmv4_config *config = &drvdata->config;
465 
466 	if (kstrtoul(buf, 16, &val))
467 		return -EINVAL;
468 
469 	spin_lock(&drvdata->spinlock);
470 	if (val > drvdata->nr_pe) {
471 		spin_unlock(&drvdata->spinlock);
472 		return -EINVAL;
473 	}
474 
475 	config->pe_sel = val;
476 	spin_unlock(&drvdata->spinlock);
477 	return size;
478 }
479 static DEVICE_ATTR_RW(pe);
480 
event_show(struct device * dev,struct device_attribute * attr,char * buf)481 static ssize_t event_show(struct device *dev,
482 			  struct device_attribute *attr,
483 			  char *buf)
484 {
485 	unsigned long val;
486 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
487 	struct etmv4_config *config = &drvdata->config;
488 
489 	val = config->eventctrl0;
490 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
491 }
492 
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)493 static ssize_t event_store(struct device *dev,
494 			   struct device_attribute *attr,
495 			   const char *buf, size_t size)
496 {
497 	unsigned long val;
498 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
499 	struct etmv4_config *config = &drvdata->config;
500 
501 	if (kstrtoul(buf, 16, &val))
502 		return -EINVAL;
503 
504 	spin_lock(&drvdata->spinlock);
505 	switch (drvdata->nr_event) {
506 	case 0x0:
507 		/* EVENT0, bits[7:0] */
508 		config->eventctrl0 = val & 0xFF;
509 		break;
510 	case 0x1:
511 		 /* EVENT1, bits[15:8] */
512 		config->eventctrl0 = val & 0xFFFF;
513 		break;
514 	case 0x2:
515 		/* EVENT2, bits[23:16] */
516 		config->eventctrl0 = val & 0xFFFFFF;
517 		break;
518 	case 0x3:
519 		/* EVENT3, bits[31:24] */
520 		config->eventctrl0 = val;
521 		break;
522 	default:
523 		break;
524 	}
525 	spin_unlock(&drvdata->spinlock);
526 	return size;
527 }
528 static DEVICE_ATTR_RW(event);
529 
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)530 static ssize_t event_instren_show(struct device *dev,
531 				  struct device_attribute *attr,
532 				  char *buf)
533 {
534 	unsigned long val;
535 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
536 	struct etmv4_config *config = &drvdata->config;
537 
538 	val = FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK, config->eventctrl1);
539 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
540 }
541 
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)542 static ssize_t event_instren_store(struct device *dev,
543 				   struct device_attribute *attr,
544 				   const char *buf, size_t size)
545 {
546 	unsigned long val;
547 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
548 	struct etmv4_config *config = &drvdata->config;
549 
550 	if (kstrtoul(buf, 16, &val))
551 		return -EINVAL;
552 
553 	spin_lock(&drvdata->spinlock);
554 	/* start by clearing all instruction event enable bits */
555 	config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
556 	switch (drvdata->nr_event) {
557 	case 0x0:
558 		/* generate Event element for event 1 */
559 		config->eventctrl1 |= val & TRCEVENTCTL1R_INSTEN_1;
560 		break;
561 	case 0x1:
562 		/* generate Event element for event 1 and 2 */
563 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 | TRCEVENTCTL1R_INSTEN_1);
564 		break;
565 	case 0x2:
566 		/* generate Event element for event 1, 2 and 3 */
567 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
568 					     TRCEVENTCTL1R_INSTEN_1 |
569 					     TRCEVENTCTL1R_INSTEN_2);
570 		break;
571 	case 0x3:
572 		/* generate Event element for all 4 events */
573 		config->eventctrl1 |= val & (TRCEVENTCTL1R_INSTEN_0 |
574 					     TRCEVENTCTL1R_INSTEN_1 |
575 					     TRCEVENTCTL1R_INSTEN_2 |
576 					     TRCEVENTCTL1R_INSTEN_3);
577 		break;
578 	default:
579 		break;
580 	}
581 	spin_unlock(&drvdata->spinlock);
582 	return size;
583 }
584 static DEVICE_ATTR_RW(event_instren);
585 
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)586 static ssize_t event_ts_show(struct device *dev,
587 			     struct device_attribute *attr,
588 			     char *buf)
589 {
590 	unsigned long val;
591 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
592 	struct etmv4_config *config = &drvdata->config;
593 
594 	val = config->ts_ctrl;
595 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
596 }
597 
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)598 static ssize_t event_ts_store(struct device *dev,
599 			      struct device_attribute *attr,
600 			      const char *buf, size_t size)
601 {
602 	unsigned long val;
603 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
604 	struct etmv4_config *config = &drvdata->config;
605 
606 	if (kstrtoul(buf, 16, &val))
607 		return -EINVAL;
608 	if (!drvdata->ts_size)
609 		return -EINVAL;
610 
611 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
612 	return size;
613 }
614 static DEVICE_ATTR_RW(event_ts);
615 
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)616 static ssize_t syncfreq_show(struct device *dev,
617 			     struct device_attribute *attr,
618 			     char *buf)
619 {
620 	unsigned long val;
621 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
622 	struct etmv4_config *config = &drvdata->config;
623 
624 	val = config->syncfreq;
625 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
626 }
627 
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)628 static ssize_t syncfreq_store(struct device *dev,
629 			      struct device_attribute *attr,
630 			      const char *buf, size_t size)
631 {
632 	unsigned long val;
633 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
634 	struct etmv4_config *config = &drvdata->config;
635 
636 	if (kstrtoul(buf, 16, &val))
637 		return -EINVAL;
638 	if (drvdata->syncpr == true)
639 		return -EINVAL;
640 
641 	config->syncfreq = val & ETMv4_SYNC_MASK;
642 	return size;
643 }
644 static DEVICE_ATTR_RW(syncfreq);
645 
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)646 static ssize_t cyc_threshold_show(struct device *dev,
647 				  struct device_attribute *attr,
648 				  char *buf)
649 {
650 	unsigned long val;
651 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
652 	struct etmv4_config *config = &drvdata->config;
653 
654 	val = config->ccctlr;
655 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
656 }
657 
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)658 static ssize_t cyc_threshold_store(struct device *dev,
659 				   struct device_attribute *attr,
660 				   const char *buf, size_t size)
661 {
662 	unsigned long val;
663 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
664 	struct etmv4_config *config = &drvdata->config;
665 
666 	if (kstrtoul(buf, 16, &val))
667 		return -EINVAL;
668 
669 	/* mask off max threshold before checking min value */
670 	val &= ETM_CYC_THRESHOLD_MASK;
671 	if (val < drvdata->ccitmin)
672 		return -EINVAL;
673 
674 	config->ccctlr = val;
675 	return size;
676 }
677 static DEVICE_ATTR_RW(cyc_threshold);
678 
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)679 static ssize_t bb_ctrl_show(struct device *dev,
680 			    struct device_attribute *attr,
681 			    char *buf)
682 {
683 	unsigned long val;
684 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
685 	struct etmv4_config *config = &drvdata->config;
686 
687 	val = config->bb_ctrl;
688 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
689 }
690 
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)691 static ssize_t bb_ctrl_store(struct device *dev,
692 			     struct device_attribute *attr,
693 			     const char *buf, size_t size)
694 {
695 	unsigned long val;
696 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
697 	struct etmv4_config *config = &drvdata->config;
698 
699 	if (kstrtoul(buf, 16, &val))
700 		return -EINVAL;
701 	if (drvdata->trcbb == false)
702 		return -EINVAL;
703 	if (!drvdata->nr_addr_cmp)
704 		return -EINVAL;
705 
706 	/*
707 	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
708 	 * individual range comparators. If include then at least 1
709 	 * range must be selected.
710 	 */
711 	if ((val & TRCBBCTLR_MODE) && (FIELD_GET(TRCBBCTLR_RANGE_MASK, val) == 0))
712 		return -EINVAL;
713 
714 	config->bb_ctrl = val & (TRCBBCTLR_MODE | TRCBBCTLR_RANGE_MASK);
715 	return size;
716 }
717 static DEVICE_ATTR_RW(bb_ctrl);
718 
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)719 static ssize_t event_vinst_show(struct device *dev,
720 				struct device_attribute *attr,
721 				char *buf)
722 {
723 	unsigned long val;
724 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
725 	struct etmv4_config *config = &drvdata->config;
726 
727 	val = FIELD_GET(TRCVICTLR_EVENT_MASK, config->vinst_ctrl);
728 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
729 }
730 
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)731 static ssize_t event_vinst_store(struct device *dev,
732 				 struct device_attribute *attr,
733 				 const char *buf, size_t size)
734 {
735 	unsigned long val;
736 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
737 	struct etmv4_config *config = &drvdata->config;
738 
739 	if (kstrtoul(buf, 16, &val))
740 		return -EINVAL;
741 
742 	spin_lock(&drvdata->spinlock);
743 	val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
744 	config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
745 	config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
746 	spin_unlock(&drvdata->spinlock);
747 	return size;
748 }
749 static DEVICE_ATTR_RW(event_vinst);
750 
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)751 static ssize_t s_exlevel_vinst_show(struct device *dev,
752 				    struct device_attribute *attr,
753 				    char *buf)
754 {
755 	unsigned long val;
756 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
757 	struct etmv4_config *config = &drvdata->config;
758 
759 	val = FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK, config->vinst_ctrl);
760 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
761 }
762 
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)763 static ssize_t s_exlevel_vinst_store(struct device *dev,
764 				     struct device_attribute *attr,
765 				     const char *buf, size_t size)
766 {
767 	unsigned long val;
768 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
769 	struct etmv4_config *config = &drvdata->config;
770 
771 	if (kstrtoul(buf, 16, &val))
772 		return -EINVAL;
773 
774 	spin_lock(&drvdata->spinlock);
775 	/* clear all EXLEVEL_S bits  */
776 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
777 	/* enable instruction tracing for corresponding exception level */
778 	val &= drvdata->s_ex_level;
779 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
780 	spin_unlock(&drvdata->spinlock);
781 	return size;
782 }
783 static DEVICE_ATTR_RW(s_exlevel_vinst);
784 
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)785 static ssize_t ns_exlevel_vinst_show(struct device *dev,
786 				     struct device_attribute *attr,
787 				     char *buf)
788 {
789 	unsigned long val;
790 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
791 	struct etmv4_config *config = &drvdata->config;
792 
793 	/* EXLEVEL_NS, bits[23:20] */
794 	val = FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK, config->vinst_ctrl);
795 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
796 }
797 
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)798 static ssize_t ns_exlevel_vinst_store(struct device *dev,
799 				      struct device_attribute *attr,
800 				      const char *buf, size_t size)
801 {
802 	unsigned long val;
803 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
804 	struct etmv4_config *config = &drvdata->config;
805 
806 	if (kstrtoul(buf, 16, &val))
807 		return -EINVAL;
808 
809 	spin_lock(&drvdata->spinlock);
810 	/* clear EXLEVEL_NS bits  */
811 	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
812 	/* enable instruction tracing for corresponding exception level */
813 	val &= drvdata->ns_ex_level;
814 	config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
815 	spin_unlock(&drvdata->spinlock);
816 	return size;
817 }
818 static DEVICE_ATTR_RW(ns_exlevel_vinst);
819 
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)820 static ssize_t addr_idx_show(struct device *dev,
821 			     struct device_attribute *attr,
822 			     char *buf)
823 {
824 	unsigned long val;
825 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
826 	struct etmv4_config *config = &drvdata->config;
827 
828 	val = config->addr_idx;
829 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
830 }
831 
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)832 static ssize_t addr_idx_store(struct device *dev,
833 			      struct device_attribute *attr,
834 			      const char *buf, size_t size)
835 {
836 	unsigned long val;
837 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
838 	struct etmv4_config *config = &drvdata->config;
839 
840 	if (kstrtoul(buf, 16, &val))
841 		return -EINVAL;
842 	if (val >= drvdata->nr_addr_cmp * 2)
843 		return -EINVAL;
844 
845 	/*
846 	 * Use spinlock to ensure index doesn't change while it gets
847 	 * dereferenced multiple times within a spinlock block elsewhere.
848 	 */
849 	spin_lock(&drvdata->spinlock);
850 	config->addr_idx = val;
851 	spin_unlock(&drvdata->spinlock);
852 	return size;
853 }
854 static DEVICE_ATTR_RW(addr_idx);
855 
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)856 static ssize_t addr_instdatatype_show(struct device *dev,
857 				      struct device_attribute *attr,
858 				      char *buf)
859 {
860 	ssize_t len;
861 	u8 val, idx;
862 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
863 	struct etmv4_config *config = &drvdata->config;
864 
865 	spin_lock(&drvdata->spinlock);
866 	idx = config->addr_idx;
867 	val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
868 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
869 			val == TRCACATRn_TYPE_ADDR ? "instr" :
870 			(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
871 			(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
872 			"data_load_store")));
873 	spin_unlock(&drvdata->spinlock);
874 	return len;
875 }
876 
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)877 static ssize_t addr_instdatatype_store(struct device *dev,
878 				       struct device_attribute *attr,
879 				       const char *buf, size_t size)
880 {
881 	u8 idx;
882 	char str[20] = "";
883 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
884 	struct etmv4_config *config = &drvdata->config;
885 
886 	if (strlen(buf) >= 20)
887 		return -EINVAL;
888 	if (sscanf(buf, "%s", str) != 1)
889 		return -EINVAL;
890 
891 	spin_lock(&drvdata->spinlock);
892 	idx = config->addr_idx;
893 	if (!strcmp(str, "instr"))
894 		/* TYPE, bits[1:0] */
895 		config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
896 
897 	spin_unlock(&drvdata->spinlock);
898 	return size;
899 }
900 static DEVICE_ATTR_RW(addr_instdatatype);
901 
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)902 static ssize_t addr_single_show(struct device *dev,
903 				struct device_attribute *attr,
904 				char *buf)
905 {
906 	u8 idx;
907 	unsigned long val;
908 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
909 	struct etmv4_config *config = &drvdata->config;
910 
911 	idx = config->addr_idx;
912 	spin_lock(&drvdata->spinlock);
913 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
914 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
915 		spin_unlock(&drvdata->spinlock);
916 		return -EPERM;
917 	}
918 	val = (unsigned long)config->addr_val[idx];
919 	spin_unlock(&drvdata->spinlock);
920 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
921 }
922 
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)923 static ssize_t addr_single_store(struct device *dev,
924 				 struct device_attribute *attr,
925 				 const char *buf, size_t size)
926 {
927 	u8 idx;
928 	unsigned long val;
929 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
930 	struct etmv4_config *config = &drvdata->config;
931 
932 	if (kstrtoul(buf, 16, &val))
933 		return -EINVAL;
934 
935 	spin_lock(&drvdata->spinlock);
936 	idx = config->addr_idx;
937 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
938 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
939 		spin_unlock(&drvdata->spinlock);
940 		return -EPERM;
941 	}
942 
943 	config->addr_val[idx] = (u64)val;
944 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
945 	spin_unlock(&drvdata->spinlock);
946 	return size;
947 }
948 static DEVICE_ATTR_RW(addr_single);
949 
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)950 static ssize_t addr_range_show(struct device *dev,
951 			       struct device_attribute *attr,
952 			       char *buf)
953 {
954 	u8 idx;
955 	unsigned long val1, val2;
956 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
957 	struct etmv4_config *config = &drvdata->config;
958 
959 	spin_lock(&drvdata->spinlock);
960 	idx = config->addr_idx;
961 	if (idx % 2 != 0) {
962 		spin_unlock(&drvdata->spinlock);
963 		return -EPERM;
964 	}
965 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
966 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
967 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
968 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
969 		spin_unlock(&drvdata->spinlock);
970 		return -EPERM;
971 	}
972 
973 	val1 = (unsigned long)config->addr_val[idx];
974 	val2 = (unsigned long)config->addr_val[idx + 1];
975 	spin_unlock(&drvdata->spinlock);
976 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
977 }
978 
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)979 static ssize_t addr_range_store(struct device *dev,
980 				struct device_attribute *attr,
981 				const char *buf, size_t size)
982 {
983 	u8 idx;
984 	unsigned long val1, val2;
985 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
986 	struct etmv4_config *config = &drvdata->config;
987 	int elements, exclude;
988 
989 	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
990 
991 	/*  exclude is optional, but need at least two parameter */
992 	if (elements < 2)
993 		return -EINVAL;
994 	/* lower address comparator cannot have a higher address value */
995 	if (val1 > val2)
996 		return -EINVAL;
997 
998 	spin_lock(&drvdata->spinlock);
999 	idx = config->addr_idx;
1000 	if (idx % 2 != 0) {
1001 		spin_unlock(&drvdata->spinlock);
1002 		return -EPERM;
1003 	}
1004 
1005 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1006 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1007 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1008 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1009 		spin_unlock(&drvdata->spinlock);
1010 		return -EPERM;
1011 	}
1012 
1013 	config->addr_val[idx] = (u64)val1;
1014 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1015 	config->addr_val[idx + 1] = (u64)val2;
1016 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1017 	/*
1018 	 * Program include or exclude control bits for vinst or vdata
1019 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1020 	 * use supplied value, or default to bit set in 'mode'
1021 	 */
1022 	if (elements != 3)
1023 		exclude = config->mode & ETM_MODE_EXCLUDE;
1024 	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1025 
1026 	spin_unlock(&drvdata->spinlock);
1027 	return size;
1028 }
1029 static DEVICE_ATTR_RW(addr_range);
1030 
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1031 static ssize_t addr_start_show(struct device *dev,
1032 			       struct device_attribute *attr,
1033 			       char *buf)
1034 {
1035 	u8 idx;
1036 	unsigned long val;
1037 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1038 	struct etmv4_config *config = &drvdata->config;
1039 
1040 	spin_lock(&drvdata->spinlock);
1041 	idx = config->addr_idx;
1042 
1043 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1044 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1045 		spin_unlock(&drvdata->spinlock);
1046 		return -EPERM;
1047 	}
1048 
1049 	val = (unsigned long)config->addr_val[idx];
1050 	spin_unlock(&drvdata->spinlock);
1051 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1052 }
1053 
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1054 static ssize_t addr_start_store(struct device *dev,
1055 				struct device_attribute *attr,
1056 				const char *buf, size_t size)
1057 {
1058 	u8 idx;
1059 	unsigned long val;
1060 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1061 	struct etmv4_config *config = &drvdata->config;
1062 
1063 	if (kstrtoul(buf, 16, &val))
1064 		return -EINVAL;
1065 
1066 	spin_lock(&drvdata->spinlock);
1067 	idx = config->addr_idx;
1068 	if (!drvdata->nr_addr_cmp) {
1069 		spin_unlock(&drvdata->spinlock);
1070 		return -EINVAL;
1071 	}
1072 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1073 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1074 		spin_unlock(&drvdata->spinlock);
1075 		return -EPERM;
1076 	}
1077 
1078 	config->addr_val[idx] = (u64)val;
1079 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1080 	config->vissctlr |= BIT(idx);
1081 	spin_unlock(&drvdata->spinlock);
1082 	return size;
1083 }
1084 static DEVICE_ATTR_RW(addr_start);
1085 
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1086 static ssize_t addr_stop_show(struct device *dev,
1087 			      struct device_attribute *attr,
1088 			      char *buf)
1089 {
1090 	u8 idx;
1091 	unsigned long val;
1092 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1093 	struct etmv4_config *config = &drvdata->config;
1094 
1095 	spin_lock(&drvdata->spinlock);
1096 	idx = config->addr_idx;
1097 
1098 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1099 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1100 		spin_unlock(&drvdata->spinlock);
1101 		return -EPERM;
1102 	}
1103 
1104 	val = (unsigned long)config->addr_val[idx];
1105 	spin_unlock(&drvdata->spinlock);
1106 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1107 }
1108 
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1109 static ssize_t addr_stop_store(struct device *dev,
1110 			       struct device_attribute *attr,
1111 			       const char *buf, size_t size)
1112 {
1113 	u8 idx;
1114 	unsigned long val;
1115 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1116 	struct etmv4_config *config = &drvdata->config;
1117 
1118 	if (kstrtoul(buf, 16, &val))
1119 		return -EINVAL;
1120 
1121 	spin_lock(&drvdata->spinlock);
1122 	idx = config->addr_idx;
1123 	if (!drvdata->nr_addr_cmp) {
1124 		spin_unlock(&drvdata->spinlock);
1125 		return -EINVAL;
1126 	}
1127 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1128 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1129 		spin_unlock(&drvdata->spinlock);
1130 		return -EPERM;
1131 	}
1132 
1133 	config->addr_val[idx] = (u64)val;
1134 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1135 	config->vissctlr |= BIT(idx + 16);
1136 	spin_unlock(&drvdata->spinlock);
1137 	return size;
1138 }
1139 static DEVICE_ATTR_RW(addr_stop);
1140 
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1141 static ssize_t addr_ctxtype_show(struct device *dev,
1142 				 struct device_attribute *attr,
1143 				 char *buf)
1144 {
1145 	ssize_t len;
1146 	u8 idx, val;
1147 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148 	struct etmv4_config *config = &drvdata->config;
1149 
1150 	spin_lock(&drvdata->spinlock);
1151 	idx = config->addr_idx;
1152 	/* CONTEXTTYPE, bits[3:2] */
1153 	val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
1154 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1155 			(val == ETM_CTX_CTXID ? "ctxid" :
1156 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1157 	spin_unlock(&drvdata->spinlock);
1158 	return len;
1159 }
1160 
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1161 static ssize_t addr_ctxtype_store(struct device *dev,
1162 				  struct device_attribute *attr,
1163 				  const char *buf, size_t size)
1164 {
1165 	u8 idx;
1166 	char str[10] = "";
1167 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1168 	struct etmv4_config *config = &drvdata->config;
1169 
1170 	if (strlen(buf) >= 10)
1171 		return -EINVAL;
1172 	if (sscanf(buf, "%s", str) != 1)
1173 		return -EINVAL;
1174 
1175 	spin_lock(&drvdata->spinlock);
1176 	idx = config->addr_idx;
1177 	if (!strcmp(str, "none"))
1178 		/* start by clearing context type bits */
1179 		config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_MASK;
1180 	else if (!strcmp(str, "ctxid")) {
1181 		/* 0b01 The trace unit performs a Context ID */
1182 		if (drvdata->numcidc) {
1183 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1184 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_VMID;
1185 		}
1186 	} else if (!strcmp(str, "vmid")) {
1187 		/* 0b10 The trace unit performs a VMID */
1188 		if (drvdata->numvmidc) {
1189 			config->addr_acc[idx] &= ~TRCACATRn_CONTEXTTYPE_CTXID;
1190 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1191 		}
1192 	} else if (!strcmp(str, "all")) {
1193 		/*
1194 		 * 0b11 The trace unit performs a Context ID
1195 		 * comparison and a VMID
1196 		 */
1197 		if (drvdata->numcidc)
1198 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_CTXID;
1199 		if (drvdata->numvmidc)
1200 			config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
1201 	}
1202 	spin_unlock(&drvdata->spinlock);
1203 	return size;
1204 }
1205 static DEVICE_ATTR_RW(addr_ctxtype);
1206 
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1207 static ssize_t addr_context_show(struct device *dev,
1208 				 struct device_attribute *attr,
1209 				 char *buf)
1210 {
1211 	u8 idx;
1212 	unsigned long val;
1213 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1214 	struct etmv4_config *config = &drvdata->config;
1215 
1216 	spin_lock(&drvdata->spinlock);
1217 	idx = config->addr_idx;
1218 	/* context ID comparator bits[6:4] */
1219 	val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
1220 	spin_unlock(&drvdata->spinlock);
1221 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1222 }
1223 
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1224 static ssize_t addr_context_store(struct device *dev,
1225 				  struct device_attribute *attr,
1226 				  const char *buf, size_t size)
1227 {
1228 	u8 idx;
1229 	unsigned long val;
1230 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1231 	struct etmv4_config *config = &drvdata->config;
1232 
1233 	if (kstrtoul(buf, 16, &val))
1234 		return -EINVAL;
1235 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1236 		return -EINVAL;
1237 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1238 		     drvdata->numcidc : drvdata->numvmidc))
1239 		return -EINVAL;
1240 
1241 	spin_lock(&drvdata->spinlock);
1242 	idx = config->addr_idx;
1243 	/* clear context ID comparator bits[6:4] */
1244 	config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
1245 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
1246 	spin_unlock(&drvdata->spinlock);
1247 	return size;
1248 }
1249 static DEVICE_ATTR_RW(addr_context);
1250 
addr_exlevel_s_ns_show(struct device * dev,struct device_attribute * attr,char * buf)1251 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1252 				      struct device_attribute *attr,
1253 				      char *buf)
1254 {
1255 	u8 idx;
1256 	unsigned long val;
1257 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1258 	struct etmv4_config *config = &drvdata->config;
1259 
1260 	spin_lock(&drvdata->spinlock);
1261 	idx = config->addr_idx;
1262 	val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
1263 	spin_unlock(&drvdata->spinlock);
1264 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1265 }
1266 
addr_exlevel_s_ns_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1267 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1268 				       struct device_attribute *attr,
1269 				       const char *buf, size_t size)
1270 {
1271 	u8 idx;
1272 	unsigned long val;
1273 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1274 	struct etmv4_config *config = &drvdata->config;
1275 
1276 	if (kstrtoul(buf, 0, &val))
1277 		return -EINVAL;
1278 
1279 	if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
1280 		return -EINVAL;
1281 
1282 	spin_lock(&drvdata->spinlock);
1283 	idx = config->addr_idx;
1284 	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1285 	config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
1286 	config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
1287 	spin_unlock(&drvdata->spinlock);
1288 	return size;
1289 }
1290 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1291 
1292 static const char * const addr_type_names[] = {
1293 	"unused",
1294 	"single",
1295 	"range",
1296 	"start",
1297 	"stop"
1298 };
1299 
addr_cmp_view_show(struct device * dev,struct device_attribute * attr,char * buf)1300 static ssize_t addr_cmp_view_show(struct device *dev,
1301 				  struct device_attribute *attr, char *buf)
1302 {
1303 	u8 idx, addr_type;
1304 	unsigned long addr_v, addr_v2, addr_ctrl;
1305 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1306 	struct etmv4_config *config = &drvdata->config;
1307 	int size = 0;
1308 	bool exclude = false;
1309 
1310 	spin_lock(&drvdata->spinlock);
1311 	idx = config->addr_idx;
1312 	addr_v = config->addr_val[idx];
1313 	addr_ctrl = config->addr_acc[idx];
1314 	addr_type = config->addr_type[idx];
1315 	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1316 		if (idx & 0x1) {
1317 			idx -= 1;
1318 			addr_v2 = addr_v;
1319 			addr_v = config->addr_val[idx];
1320 		} else {
1321 			addr_v2 = config->addr_val[idx + 1];
1322 		}
1323 		exclude = config->viiectlr & BIT(idx / 2 + 16);
1324 	}
1325 	spin_unlock(&drvdata->spinlock);
1326 	if (addr_type) {
1327 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1328 				 addr_type_names[addr_type], addr_v);
1329 		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1330 			size += scnprintf(buf + size, PAGE_SIZE - size,
1331 					  " %#lx %s", addr_v2,
1332 					  exclude ? "exclude" : "include");
1333 		}
1334 		size += scnprintf(buf + size, PAGE_SIZE - size,
1335 				  " ctrl(%#lx)\n", addr_ctrl);
1336 	} else {
1337 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1338 	}
1339 	return size;
1340 }
1341 static DEVICE_ATTR_RO(addr_cmp_view);
1342 
vinst_pe_cmp_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1343 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1344 					    struct device_attribute *attr,
1345 					    char *buf)
1346 {
1347 	unsigned long val;
1348 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1349 	struct etmv4_config *config = &drvdata->config;
1350 
1351 	if (!drvdata->nr_pe_cmp)
1352 		return -EINVAL;
1353 	val = config->vipcssctlr;
1354 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1355 }
vinst_pe_cmp_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1356 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1357 					     struct device_attribute *attr,
1358 					     const char *buf, size_t size)
1359 {
1360 	unsigned long val;
1361 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1362 	struct etmv4_config *config = &drvdata->config;
1363 
1364 	if (kstrtoul(buf, 16, &val))
1365 		return -EINVAL;
1366 	if (!drvdata->nr_pe_cmp)
1367 		return -EINVAL;
1368 
1369 	spin_lock(&drvdata->spinlock);
1370 	config->vipcssctlr = val;
1371 	spin_unlock(&drvdata->spinlock);
1372 	return size;
1373 }
1374 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1375 
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1376 static ssize_t seq_idx_show(struct device *dev,
1377 			    struct device_attribute *attr,
1378 			    char *buf)
1379 {
1380 	unsigned long val;
1381 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382 	struct etmv4_config *config = &drvdata->config;
1383 
1384 	val = config->seq_idx;
1385 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1386 }
1387 
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1388 static ssize_t seq_idx_store(struct device *dev,
1389 			     struct device_attribute *attr,
1390 			     const char *buf, size_t size)
1391 {
1392 	unsigned long val;
1393 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1394 	struct etmv4_config *config = &drvdata->config;
1395 
1396 	if (kstrtoul(buf, 16, &val))
1397 		return -EINVAL;
1398 	if (val >= drvdata->nrseqstate - 1)
1399 		return -EINVAL;
1400 
1401 	/*
1402 	 * Use spinlock to ensure index doesn't change while it gets
1403 	 * dereferenced multiple times within a spinlock block elsewhere.
1404 	 */
1405 	spin_lock(&drvdata->spinlock);
1406 	config->seq_idx = val;
1407 	spin_unlock(&drvdata->spinlock);
1408 	return size;
1409 }
1410 static DEVICE_ATTR_RW(seq_idx);
1411 
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1412 static ssize_t seq_state_show(struct device *dev,
1413 			      struct device_attribute *attr,
1414 			      char *buf)
1415 {
1416 	unsigned long val;
1417 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1418 	struct etmv4_config *config = &drvdata->config;
1419 
1420 	val = config->seq_state;
1421 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1422 }
1423 
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1424 static ssize_t seq_state_store(struct device *dev,
1425 			       struct device_attribute *attr,
1426 			       const char *buf, size_t size)
1427 {
1428 	unsigned long val;
1429 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1430 	struct etmv4_config *config = &drvdata->config;
1431 
1432 	if (kstrtoul(buf, 16, &val))
1433 		return -EINVAL;
1434 	if (val >= drvdata->nrseqstate)
1435 		return -EINVAL;
1436 
1437 	config->seq_state = val;
1438 	return size;
1439 }
1440 static DEVICE_ATTR_RW(seq_state);
1441 
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1442 static ssize_t seq_event_show(struct device *dev,
1443 			      struct device_attribute *attr,
1444 			      char *buf)
1445 {
1446 	u8 idx;
1447 	unsigned long val;
1448 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1449 	struct etmv4_config *config = &drvdata->config;
1450 
1451 	spin_lock(&drvdata->spinlock);
1452 	idx = config->seq_idx;
1453 	val = config->seq_ctrl[idx];
1454 	spin_unlock(&drvdata->spinlock);
1455 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1456 }
1457 
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1458 static ssize_t seq_event_store(struct device *dev,
1459 			       struct device_attribute *attr,
1460 			       const char *buf, size_t size)
1461 {
1462 	u8 idx;
1463 	unsigned long val;
1464 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1465 	struct etmv4_config *config = &drvdata->config;
1466 
1467 	if (kstrtoul(buf, 16, &val))
1468 		return -EINVAL;
1469 
1470 	spin_lock(&drvdata->spinlock);
1471 	idx = config->seq_idx;
1472 	/* Seq control has two masks B[15:8] F[7:0] */
1473 	config->seq_ctrl[idx] = val & 0xFFFF;
1474 	spin_unlock(&drvdata->spinlock);
1475 	return size;
1476 }
1477 static DEVICE_ATTR_RW(seq_event);
1478 
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1479 static ssize_t seq_reset_event_show(struct device *dev,
1480 				    struct device_attribute *attr,
1481 				    char *buf)
1482 {
1483 	unsigned long val;
1484 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1485 	struct etmv4_config *config = &drvdata->config;
1486 
1487 	val = config->seq_rst;
1488 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1489 }
1490 
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1491 static ssize_t seq_reset_event_store(struct device *dev,
1492 				     struct device_attribute *attr,
1493 				     const char *buf, size_t size)
1494 {
1495 	unsigned long val;
1496 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1497 	struct etmv4_config *config = &drvdata->config;
1498 
1499 	if (kstrtoul(buf, 16, &val))
1500 		return -EINVAL;
1501 	if (!(drvdata->nrseqstate))
1502 		return -EINVAL;
1503 
1504 	config->seq_rst = val & ETMv4_EVENT_MASK;
1505 	return size;
1506 }
1507 static DEVICE_ATTR_RW(seq_reset_event);
1508 
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1509 static ssize_t cntr_idx_show(struct device *dev,
1510 			     struct device_attribute *attr,
1511 			     char *buf)
1512 {
1513 	unsigned long val;
1514 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515 	struct etmv4_config *config = &drvdata->config;
1516 
1517 	val = config->cntr_idx;
1518 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1519 }
1520 
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1521 static ssize_t cntr_idx_store(struct device *dev,
1522 			      struct device_attribute *attr,
1523 			      const char *buf, size_t size)
1524 {
1525 	unsigned long val;
1526 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1527 	struct etmv4_config *config = &drvdata->config;
1528 
1529 	if (kstrtoul(buf, 16, &val))
1530 		return -EINVAL;
1531 	if (val >= drvdata->nr_cntr)
1532 		return -EINVAL;
1533 
1534 	/*
1535 	 * Use spinlock to ensure index doesn't change while it gets
1536 	 * dereferenced multiple times within a spinlock block elsewhere.
1537 	 */
1538 	spin_lock(&drvdata->spinlock);
1539 	config->cntr_idx = val;
1540 	spin_unlock(&drvdata->spinlock);
1541 	return size;
1542 }
1543 static DEVICE_ATTR_RW(cntr_idx);
1544 
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1545 static ssize_t cntrldvr_show(struct device *dev,
1546 			     struct device_attribute *attr,
1547 			     char *buf)
1548 {
1549 	u8 idx;
1550 	unsigned long val;
1551 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1552 	struct etmv4_config *config = &drvdata->config;
1553 
1554 	spin_lock(&drvdata->spinlock);
1555 	idx = config->cntr_idx;
1556 	val = config->cntrldvr[idx];
1557 	spin_unlock(&drvdata->spinlock);
1558 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1559 }
1560 
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1561 static ssize_t cntrldvr_store(struct device *dev,
1562 			      struct device_attribute *attr,
1563 			      const char *buf, size_t size)
1564 {
1565 	u8 idx;
1566 	unsigned long val;
1567 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1568 	struct etmv4_config *config = &drvdata->config;
1569 
1570 	if (kstrtoul(buf, 16, &val))
1571 		return -EINVAL;
1572 	if (val > ETM_CNTR_MAX_VAL)
1573 		return -EINVAL;
1574 
1575 	spin_lock(&drvdata->spinlock);
1576 	idx = config->cntr_idx;
1577 	config->cntrldvr[idx] = val;
1578 	spin_unlock(&drvdata->spinlock);
1579 	return size;
1580 }
1581 static DEVICE_ATTR_RW(cntrldvr);
1582 
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1583 static ssize_t cntr_val_show(struct device *dev,
1584 			     struct device_attribute *attr,
1585 			     char *buf)
1586 {
1587 	u8 idx;
1588 	unsigned long val;
1589 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1590 	struct etmv4_config *config = &drvdata->config;
1591 
1592 	spin_lock(&drvdata->spinlock);
1593 	idx = config->cntr_idx;
1594 	val = config->cntr_val[idx];
1595 	spin_unlock(&drvdata->spinlock);
1596 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1597 }
1598 
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1599 static ssize_t cntr_val_store(struct device *dev,
1600 			      struct device_attribute *attr,
1601 			      const char *buf, size_t size)
1602 {
1603 	u8 idx;
1604 	unsigned long val;
1605 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1606 	struct etmv4_config *config = &drvdata->config;
1607 
1608 	if (kstrtoul(buf, 16, &val))
1609 		return -EINVAL;
1610 	if (val > ETM_CNTR_MAX_VAL)
1611 		return -EINVAL;
1612 
1613 	spin_lock(&drvdata->spinlock);
1614 	idx = config->cntr_idx;
1615 	config->cntr_val[idx] = val;
1616 	spin_unlock(&drvdata->spinlock);
1617 	return size;
1618 }
1619 static DEVICE_ATTR_RW(cntr_val);
1620 
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1621 static ssize_t cntr_ctrl_show(struct device *dev,
1622 			      struct device_attribute *attr,
1623 			      char *buf)
1624 {
1625 	u8 idx;
1626 	unsigned long val;
1627 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1628 	struct etmv4_config *config = &drvdata->config;
1629 
1630 	spin_lock(&drvdata->spinlock);
1631 	idx = config->cntr_idx;
1632 	val = config->cntr_ctrl[idx];
1633 	spin_unlock(&drvdata->spinlock);
1634 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1635 }
1636 
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1637 static ssize_t cntr_ctrl_store(struct device *dev,
1638 			       struct device_attribute *attr,
1639 			       const char *buf, size_t size)
1640 {
1641 	u8 idx;
1642 	unsigned long val;
1643 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1644 	struct etmv4_config *config = &drvdata->config;
1645 
1646 	if (kstrtoul(buf, 16, &val))
1647 		return -EINVAL;
1648 
1649 	spin_lock(&drvdata->spinlock);
1650 	idx = config->cntr_idx;
1651 	config->cntr_ctrl[idx] = val;
1652 	spin_unlock(&drvdata->spinlock);
1653 	return size;
1654 }
1655 static DEVICE_ATTR_RW(cntr_ctrl);
1656 
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1657 static ssize_t res_idx_show(struct device *dev,
1658 			    struct device_attribute *attr,
1659 			    char *buf)
1660 {
1661 	unsigned long val;
1662 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1663 	struct etmv4_config *config = &drvdata->config;
1664 
1665 	val = config->res_idx;
1666 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1667 }
1668 
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1669 static ssize_t res_idx_store(struct device *dev,
1670 			     struct device_attribute *attr,
1671 			     const char *buf, size_t size)
1672 {
1673 	unsigned long val;
1674 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1675 	struct etmv4_config *config = &drvdata->config;
1676 
1677 	if (kstrtoul(buf, 16, &val))
1678 		return -EINVAL;
1679 	/*
1680 	 * Resource selector pair 0 is always implemented and reserved,
1681 	 * namely an idx with 0 and 1 is illegal.
1682 	 */
1683 	if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1684 		return -EINVAL;
1685 
1686 	/*
1687 	 * Use spinlock to ensure index doesn't change while it gets
1688 	 * dereferenced multiple times within a spinlock block elsewhere.
1689 	 */
1690 	spin_lock(&drvdata->spinlock);
1691 	config->res_idx = val;
1692 	spin_unlock(&drvdata->spinlock);
1693 	return size;
1694 }
1695 static DEVICE_ATTR_RW(res_idx);
1696 
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1697 static ssize_t res_ctrl_show(struct device *dev,
1698 			     struct device_attribute *attr,
1699 			     char *buf)
1700 {
1701 	u8 idx;
1702 	unsigned long val;
1703 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1704 	struct etmv4_config *config = &drvdata->config;
1705 
1706 	spin_lock(&drvdata->spinlock);
1707 	idx = config->res_idx;
1708 	val = config->res_ctrl[idx];
1709 	spin_unlock(&drvdata->spinlock);
1710 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1711 }
1712 
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1713 static ssize_t res_ctrl_store(struct device *dev,
1714 			      struct device_attribute *attr,
1715 			      const char *buf, size_t size)
1716 {
1717 	u8 idx;
1718 	unsigned long val;
1719 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1720 	struct etmv4_config *config = &drvdata->config;
1721 
1722 	if (kstrtoul(buf, 16, &val))
1723 		return -EINVAL;
1724 
1725 	spin_lock(&drvdata->spinlock);
1726 	idx = config->res_idx;
1727 	/* For odd idx pair inversal bit is RES0 */
1728 	if (idx % 2 != 0)
1729 		/* PAIRINV, bit[21] */
1730 		val &= ~TRCRSCTLRn_PAIRINV;
1731 	config->res_ctrl[idx] = val & (TRCRSCTLRn_PAIRINV |
1732 				       TRCRSCTLRn_INV |
1733 				       TRCRSCTLRn_GROUP_MASK |
1734 				       TRCRSCTLRn_SELECT_MASK);
1735 	spin_unlock(&drvdata->spinlock);
1736 	return size;
1737 }
1738 static DEVICE_ATTR_RW(res_ctrl);
1739 
sshot_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1740 static ssize_t sshot_idx_show(struct device *dev,
1741 			      struct device_attribute *attr, char *buf)
1742 {
1743 	unsigned long val;
1744 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1745 	struct etmv4_config *config = &drvdata->config;
1746 
1747 	val = config->ss_idx;
1748 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1749 }
1750 
sshot_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1751 static ssize_t sshot_idx_store(struct device *dev,
1752 			       struct device_attribute *attr,
1753 			       const char *buf, size_t size)
1754 {
1755 	unsigned long val;
1756 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1757 	struct etmv4_config *config = &drvdata->config;
1758 
1759 	if (kstrtoul(buf, 16, &val))
1760 		return -EINVAL;
1761 	if (val >= drvdata->nr_ss_cmp)
1762 		return -EINVAL;
1763 
1764 	spin_lock(&drvdata->spinlock);
1765 	config->ss_idx = val;
1766 	spin_unlock(&drvdata->spinlock);
1767 	return size;
1768 }
1769 static DEVICE_ATTR_RW(sshot_idx);
1770 
sshot_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1771 static ssize_t sshot_ctrl_show(struct device *dev,
1772 			       struct device_attribute *attr,
1773 			       char *buf)
1774 {
1775 	unsigned long val;
1776 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1777 	struct etmv4_config *config = &drvdata->config;
1778 
1779 	spin_lock(&drvdata->spinlock);
1780 	val = config->ss_ctrl[config->ss_idx];
1781 	spin_unlock(&drvdata->spinlock);
1782 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1783 }
1784 
sshot_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1785 static ssize_t sshot_ctrl_store(struct device *dev,
1786 				struct device_attribute *attr,
1787 				const char *buf, size_t size)
1788 {
1789 	u8 idx;
1790 	unsigned long val;
1791 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1792 	struct etmv4_config *config = &drvdata->config;
1793 
1794 	if (kstrtoul(buf, 16, &val))
1795 		return -EINVAL;
1796 
1797 	spin_lock(&drvdata->spinlock);
1798 	idx = config->ss_idx;
1799 	config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
1800 	/* must clear bit 31 in related status register on programming */
1801 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1802 	spin_unlock(&drvdata->spinlock);
1803 	return size;
1804 }
1805 static DEVICE_ATTR_RW(sshot_ctrl);
1806 
sshot_status_show(struct device * dev,struct device_attribute * attr,char * buf)1807 static ssize_t sshot_status_show(struct device *dev,
1808 				 struct device_attribute *attr, char *buf)
1809 {
1810 	unsigned long val;
1811 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1812 	struct etmv4_config *config = &drvdata->config;
1813 
1814 	spin_lock(&drvdata->spinlock);
1815 	val = config->ss_status[config->ss_idx];
1816 	spin_unlock(&drvdata->spinlock);
1817 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1818 }
1819 static DEVICE_ATTR_RO(sshot_status);
1820 
sshot_pe_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1821 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1822 				  struct device_attribute *attr,
1823 				  char *buf)
1824 {
1825 	unsigned long val;
1826 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1827 	struct etmv4_config *config = &drvdata->config;
1828 
1829 	spin_lock(&drvdata->spinlock);
1830 	val = config->ss_pe_cmp[config->ss_idx];
1831 	spin_unlock(&drvdata->spinlock);
1832 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1833 }
1834 
sshot_pe_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1835 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1836 				   struct device_attribute *attr,
1837 				   const char *buf, size_t size)
1838 {
1839 	u8 idx;
1840 	unsigned long val;
1841 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1842 	struct etmv4_config *config = &drvdata->config;
1843 
1844 	if (kstrtoul(buf, 16, &val))
1845 		return -EINVAL;
1846 
1847 	spin_lock(&drvdata->spinlock);
1848 	idx = config->ss_idx;
1849 	config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
1850 	/* must clear bit 31 in related status register on programming */
1851 	config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
1852 	spin_unlock(&drvdata->spinlock);
1853 	return size;
1854 }
1855 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1856 
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1857 static ssize_t ctxid_idx_show(struct device *dev,
1858 			      struct device_attribute *attr,
1859 			      char *buf)
1860 {
1861 	unsigned long val;
1862 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1863 	struct etmv4_config *config = &drvdata->config;
1864 
1865 	val = config->ctxid_idx;
1866 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1867 }
1868 
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1869 static ssize_t ctxid_idx_store(struct device *dev,
1870 			       struct device_attribute *attr,
1871 			       const char *buf, size_t size)
1872 {
1873 	unsigned long val;
1874 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1875 	struct etmv4_config *config = &drvdata->config;
1876 
1877 	if (kstrtoul(buf, 16, &val))
1878 		return -EINVAL;
1879 	if (val >= drvdata->numcidc)
1880 		return -EINVAL;
1881 
1882 	/*
1883 	 * Use spinlock to ensure index doesn't change while it gets
1884 	 * dereferenced multiple times within a spinlock block elsewhere.
1885 	 */
1886 	spin_lock(&drvdata->spinlock);
1887 	config->ctxid_idx = val;
1888 	spin_unlock(&drvdata->spinlock);
1889 	return size;
1890 }
1891 static DEVICE_ATTR_RW(ctxid_idx);
1892 
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1893 static ssize_t ctxid_pid_show(struct device *dev,
1894 			      struct device_attribute *attr,
1895 			      char *buf)
1896 {
1897 	u8 idx;
1898 	unsigned long val;
1899 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1900 	struct etmv4_config *config = &drvdata->config;
1901 
1902 	/*
1903 	 * Don't use contextID tracing if coming from a PID namespace.  See
1904 	 * comment in ctxid_pid_store().
1905 	 */
1906 	if (task_active_pid_ns(current) != &init_pid_ns)
1907 		return -EINVAL;
1908 
1909 	spin_lock(&drvdata->spinlock);
1910 	idx = config->ctxid_idx;
1911 	val = (unsigned long)config->ctxid_pid[idx];
1912 	spin_unlock(&drvdata->spinlock);
1913 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1914 }
1915 
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1916 static ssize_t ctxid_pid_store(struct device *dev,
1917 			       struct device_attribute *attr,
1918 			       const char *buf, size_t size)
1919 {
1920 	u8 idx;
1921 	unsigned long pid;
1922 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1923 	struct etmv4_config *config = &drvdata->config;
1924 
1925 	/*
1926 	 * When contextID tracing is enabled the tracers will insert the
1927 	 * value found in the contextID register in the trace stream.  But if
1928 	 * a process is in a namespace the PID of that process as seen from the
1929 	 * namespace won't be what the kernel sees, something that makes the
1930 	 * feature confusing and can potentially leak kernel only information.
1931 	 * As such refuse to use the feature if @current is not in the initial
1932 	 * PID namespace.
1933 	 */
1934 	if (task_active_pid_ns(current) != &init_pid_ns)
1935 		return -EINVAL;
1936 
1937 	/*
1938 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1939 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1940 	 * in length
1941 	 */
1942 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1943 		return -EINVAL;
1944 	if (kstrtoul(buf, 16, &pid))
1945 		return -EINVAL;
1946 
1947 	spin_lock(&drvdata->spinlock);
1948 	idx = config->ctxid_idx;
1949 	config->ctxid_pid[idx] = (u64)pid;
1950 	spin_unlock(&drvdata->spinlock);
1951 	return size;
1952 }
1953 static DEVICE_ATTR_RW(ctxid_pid);
1954 
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1955 static ssize_t ctxid_masks_show(struct device *dev,
1956 				struct device_attribute *attr,
1957 				char *buf)
1958 {
1959 	unsigned long val1, val2;
1960 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1961 	struct etmv4_config *config = &drvdata->config;
1962 
1963 	/*
1964 	 * Don't use contextID tracing if coming from a PID namespace.  See
1965 	 * comment in ctxid_pid_store().
1966 	 */
1967 	if (task_active_pid_ns(current) != &init_pid_ns)
1968 		return -EINVAL;
1969 
1970 	spin_lock(&drvdata->spinlock);
1971 	val1 = config->ctxid_mask0;
1972 	val2 = config->ctxid_mask1;
1973 	spin_unlock(&drvdata->spinlock);
1974 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1975 }
1976 
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1977 static ssize_t ctxid_masks_store(struct device *dev,
1978 				struct device_attribute *attr,
1979 				const char *buf, size_t size)
1980 {
1981 	u8 i, j, maskbyte;
1982 	unsigned long val1, val2, mask;
1983 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1984 	struct etmv4_config *config = &drvdata->config;
1985 	int nr_inputs;
1986 
1987 	/*
1988 	 * Don't use contextID tracing if coming from a PID namespace.  See
1989 	 * comment in ctxid_pid_store().
1990 	 */
1991 	if (task_active_pid_ns(current) != &init_pid_ns)
1992 		return -EINVAL;
1993 
1994 	/*
1995 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1996 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1997 	 * in length
1998 	 */
1999 	if (!drvdata->ctxid_size || !drvdata->numcidc)
2000 		return -EINVAL;
2001 	/* one mask if <= 4 comparators, two for up to 8 */
2002 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2003 	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
2004 		return -EINVAL;
2005 
2006 	spin_lock(&drvdata->spinlock);
2007 	/*
2008 	 * each byte[0..3] controls mask value applied to ctxid
2009 	 * comparator[0..3]
2010 	 */
2011 	switch (drvdata->numcidc) {
2012 	case 0x1:
2013 		/* COMP0, bits[7:0] */
2014 		config->ctxid_mask0 = val1 & 0xFF;
2015 		break;
2016 	case 0x2:
2017 		/* COMP1, bits[15:8] */
2018 		config->ctxid_mask0 = val1 & 0xFFFF;
2019 		break;
2020 	case 0x3:
2021 		/* COMP2, bits[23:16] */
2022 		config->ctxid_mask0 = val1 & 0xFFFFFF;
2023 		break;
2024 	case 0x4:
2025 		 /* COMP3, bits[31:24] */
2026 		config->ctxid_mask0 = val1;
2027 		break;
2028 	case 0x5:
2029 		/* COMP4, bits[7:0] */
2030 		config->ctxid_mask0 = val1;
2031 		config->ctxid_mask1 = val2 & 0xFF;
2032 		break;
2033 	case 0x6:
2034 		/* COMP5, bits[15:8] */
2035 		config->ctxid_mask0 = val1;
2036 		config->ctxid_mask1 = val2 & 0xFFFF;
2037 		break;
2038 	case 0x7:
2039 		/* COMP6, bits[23:16] */
2040 		config->ctxid_mask0 = val1;
2041 		config->ctxid_mask1 = val2 & 0xFFFFFF;
2042 		break;
2043 	case 0x8:
2044 		/* COMP7, bits[31:24] */
2045 		config->ctxid_mask0 = val1;
2046 		config->ctxid_mask1 = val2;
2047 		break;
2048 	default:
2049 		break;
2050 	}
2051 	/*
2052 	 * If software sets a mask bit to 1, it must program relevant byte
2053 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2054 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2055 	 * of ctxid comparator0 value (corresponding to byte 0) register.
2056 	 */
2057 	mask = config->ctxid_mask0;
2058 	for (i = 0; i < drvdata->numcidc; i++) {
2059 		/* mask value of corresponding ctxid comparator */
2060 		maskbyte = mask & ETMv4_EVENT_MASK;
2061 		/*
2062 		 * each bit corresponds to a byte of respective ctxid comparator
2063 		 * value register
2064 		 */
2065 		for (j = 0; j < 8; j++) {
2066 			if (maskbyte & 1)
2067 				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2068 			maskbyte >>= 1;
2069 		}
2070 		/* Select the next ctxid comparator mask value */
2071 		if (i == 3)
2072 			/* ctxid comparators[4-7] */
2073 			mask = config->ctxid_mask1;
2074 		else
2075 			mask >>= 0x8;
2076 	}
2077 
2078 	spin_unlock(&drvdata->spinlock);
2079 	return size;
2080 }
2081 static DEVICE_ATTR_RW(ctxid_masks);
2082 
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)2083 static ssize_t vmid_idx_show(struct device *dev,
2084 			     struct device_attribute *attr,
2085 			     char *buf)
2086 {
2087 	unsigned long val;
2088 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2089 	struct etmv4_config *config = &drvdata->config;
2090 
2091 	val = config->vmid_idx;
2092 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2093 }
2094 
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2095 static ssize_t vmid_idx_store(struct device *dev,
2096 			      struct device_attribute *attr,
2097 			      const char *buf, size_t size)
2098 {
2099 	unsigned long val;
2100 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2101 	struct etmv4_config *config = &drvdata->config;
2102 
2103 	if (kstrtoul(buf, 16, &val))
2104 		return -EINVAL;
2105 	if (val >= drvdata->numvmidc)
2106 		return -EINVAL;
2107 
2108 	/*
2109 	 * Use spinlock to ensure index doesn't change while it gets
2110 	 * dereferenced multiple times within a spinlock block elsewhere.
2111 	 */
2112 	spin_lock(&drvdata->spinlock);
2113 	config->vmid_idx = val;
2114 	spin_unlock(&drvdata->spinlock);
2115 	return size;
2116 }
2117 static DEVICE_ATTR_RW(vmid_idx);
2118 
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)2119 static ssize_t vmid_val_show(struct device *dev,
2120 			     struct device_attribute *attr,
2121 			     char *buf)
2122 {
2123 	unsigned long val;
2124 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2125 	struct etmv4_config *config = &drvdata->config;
2126 
2127 	/*
2128 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2129 	 * See comment in ctxid_pid_store().
2130 	 */
2131 	if (!task_is_in_init_pid_ns(current))
2132 		return -EINVAL;
2133 
2134 	spin_lock(&drvdata->spinlock);
2135 	val = (unsigned long)config->vmid_val[config->vmid_idx];
2136 	spin_unlock(&drvdata->spinlock);
2137 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2138 }
2139 
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2140 static ssize_t vmid_val_store(struct device *dev,
2141 			      struct device_attribute *attr,
2142 			      const char *buf, size_t size)
2143 {
2144 	unsigned long val;
2145 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2146 	struct etmv4_config *config = &drvdata->config;
2147 
2148 	/*
2149 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2150 	 * See comment in ctxid_pid_store().
2151 	 */
2152 	if (!task_is_in_init_pid_ns(current))
2153 		return -EINVAL;
2154 
2155 	/*
2156 	 * only implemented when vmid tracing is enabled, i.e. at least one
2157 	 * vmid comparator is implemented and at least 8 bit vmid size
2158 	 */
2159 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2160 		return -EINVAL;
2161 	if (kstrtoul(buf, 16, &val))
2162 		return -EINVAL;
2163 
2164 	spin_lock(&drvdata->spinlock);
2165 	config->vmid_val[config->vmid_idx] = (u64)val;
2166 	spin_unlock(&drvdata->spinlock);
2167 	return size;
2168 }
2169 static DEVICE_ATTR_RW(vmid_val);
2170 
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)2171 static ssize_t vmid_masks_show(struct device *dev,
2172 			       struct device_attribute *attr, char *buf)
2173 {
2174 	unsigned long val1, val2;
2175 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2176 	struct etmv4_config *config = &drvdata->config;
2177 
2178 	/*
2179 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2180 	 * See comment in ctxid_pid_store().
2181 	 */
2182 	if (!task_is_in_init_pid_ns(current))
2183 		return -EINVAL;
2184 
2185 	spin_lock(&drvdata->spinlock);
2186 	val1 = config->vmid_mask0;
2187 	val2 = config->vmid_mask1;
2188 	spin_unlock(&drvdata->spinlock);
2189 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2190 }
2191 
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2192 static ssize_t vmid_masks_store(struct device *dev,
2193 				struct device_attribute *attr,
2194 				const char *buf, size_t size)
2195 {
2196 	u8 i, j, maskbyte;
2197 	unsigned long val1, val2, mask;
2198 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2199 	struct etmv4_config *config = &drvdata->config;
2200 	int nr_inputs;
2201 
2202 	/*
2203 	 * Don't use virtual contextID tracing if coming from a PID namespace.
2204 	 * See comment in ctxid_pid_store().
2205 	 */
2206 	if (!task_is_in_init_pid_ns(current))
2207 		return -EINVAL;
2208 
2209 	/*
2210 	 * only implemented when vmid tracing is enabled, i.e. at least one
2211 	 * vmid comparator is implemented and at least 8 bit vmid size
2212 	 */
2213 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2214 		return -EINVAL;
2215 	/* one mask if <= 4 comparators, two for up to 8 */
2216 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2217 	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2218 		return -EINVAL;
2219 
2220 	spin_lock(&drvdata->spinlock);
2221 
2222 	/*
2223 	 * each byte[0..3] controls mask value applied to vmid
2224 	 * comparator[0..3]
2225 	 */
2226 	switch (drvdata->numvmidc) {
2227 	case 0x1:
2228 		/* COMP0, bits[7:0] */
2229 		config->vmid_mask0 = val1 & 0xFF;
2230 		break;
2231 	case 0x2:
2232 		/* COMP1, bits[15:8] */
2233 		config->vmid_mask0 = val1 & 0xFFFF;
2234 		break;
2235 	case 0x3:
2236 		/* COMP2, bits[23:16] */
2237 		config->vmid_mask0 = val1 & 0xFFFFFF;
2238 		break;
2239 	case 0x4:
2240 		/* COMP3, bits[31:24] */
2241 		config->vmid_mask0 = val1;
2242 		break;
2243 	case 0x5:
2244 		/* COMP4, bits[7:0] */
2245 		config->vmid_mask0 = val1;
2246 		config->vmid_mask1 = val2 & 0xFF;
2247 		break;
2248 	case 0x6:
2249 		/* COMP5, bits[15:8] */
2250 		config->vmid_mask0 = val1;
2251 		config->vmid_mask1 = val2 & 0xFFFF;
2252 		break;
2253 	case 0x7:
2254 		/* COMP6, bits[23:16] */
2255 		config->vmid_mask0 = val1;
2256 		config->vmid_mask1 = val2 & 0xFFFFFF;
2257 		break;
2258 	case 0x8:
2259 		/* COMP7, bits[31:24] */
2260 		config->vmid_mask0 = val1;
2261 		config->vmid_mask1 = val2;
2262 		break;
2263 	default:
2264 		break;
2265 	}
2266 
2267 	/*
2268 	 * If software sets a mask bit to 1, it must program relevant byte
2269 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2270 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2271 	 * of vmid comparator0 value (corresponding to byte 0) register.
2272 	 */
2273 	mask = config->vmid_mask0;
2274 	for (i = 0; i < drvdata->numvmidc; i++) {
2275 		/* mask value of corresponding vmid comparator */
2276 		maskbyte = mask & ETMv4_EVENT_MASK;
2277 		/*
2278 		 * each bit corresponds to a byte of respective vmid comparator
2279 		 * value register
2280 		 */
2281 		for (j = 0; j < 8; j++) {
2282 			if (maskbyte & 1)
2283 				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2284 			maskbyte >>= 1;
2285 		}
2286 		/* Select the next vmid comparator mask value */
2287 		if (i == 3)
2288 			/* vmid comparators[4-7] */
2289 			mask = config->vmid_mask1;
2290 		else
2291 			mask >>= 0x8;
2292 	}
2293 	spin_unlock(&drvdata->spinlock);
2294 	return size;
2295 }
2296 static DEVICE_ATTR_RW(vmid_masks);
2297 
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)2298 static ssize_t cpu_show(struct device *dev,
2299 			struct device_attribute *attr, char *buf)
2300 {
2301 	int val;
2302 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2303 
2304 	val = drvdata->cpu;
2305 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2306 
2307 }
2308 static DEVICE_ATTR_RO(cpu);
2309 
ts_source_show(struct device * dev,struct device_attribute * attr,char * buf)2310 static ssize_t ts_source_show(struct device *dev,
2311 			      struct device_attribute *attr,
2312 			      char *buf)
2313 {
2314 	int val;
2315 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2316 
2317 	if (!drvdata->trfcr) {
2318 		val = -1;
2319 		goto out;
2320 	}
2321 
2322 	switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
2323 	case TRFCR_ELx_TS_VIRTUAL:
2324 	case TRFCR_ELx_TS_GUEST_PHYSICAL:
2325 	case TRFCR_ELx_TS_PHYSICAL:
2326 		val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
2327 		break;
2328 	default:
2329 		val = -1;
2330 		break;
2331 	}
2332 
2333 out:
2334 	return sysfs_emit(buf, "%d\n", val);
2335 }
2336 static DEVICE_ATTR_RO(ts_source);
2337 
2338 static struct attribute *coresight_etmv4_attrs[] = {
2339 	&dev_attr_nr_pe_cmp.attr,
2340 	&dev_attr_nr_addr_cmp.attr,
2341 	&dev_attr_nr_cntr.attr,
2342 	&dev_attr_nr_ext_inp.attr,
2343 	&dev_attr_numcidc.attr,
2344 	&dev_attr_numvmidc.attr,
2345 	&dev_attr_nrseqstate.attr,
2346 	&dev_attr_nr_resource.attr,
2347 	&dev_attr_nr_ss_cmp.attr,
2348 	&dev_attr_reset.attr,
2349 	&dev_attr_mode.attr,
2350 	&dev_attr_pe.attr,
2351 	&dev_attr_event.attr,
2352 	&dev_attr_event_instren.attr,
2353 	&dev_attr_event_ts.attr,
2354 	&dev_attr_syncfreq.attr,
2355 	&dev_attr_cyc_threshold.attr,
2356 	&dev_attr_bb_ctrl.attr,
2357 	&dev_attr_event_vinst.attr,
2358 	&dev_attr_s_exlevel_vinst.attr,
2359 	&dev_attr_ns_exlevel_vinst.attr,
2360 	&dev_attr_addr_idx.attr,
2361 	&dev_attr_addr_instdatatype.attr,
2362 	&dev_attr_addr_single.attr,
2363 	&dev_attr_addr_range.attr,
2364 	&dev_attr_addr_start.attr,
2365 	&dev_attr_addr_stop.attr,
2366 	&dev_attr_addr_ctxtype.attr,
2367 	&dev_attr_addr_context.attr,
2368 	&dev_attr_addr_exlevel_s_ns.attr,
2369 	&dev_attr_addr_cmp_view.attr,
2370 	&dev_attr_vinst_pe_cmp_start_stop.attr,
2371 	&dev_attr_sshot_idx.attr,
2372 	&dev_attr_sshot_ctrl.attr,
2373 	&dev_attr_sshot_pe_ctrl.attr,
2374 	&dev_attr_sshot_status.attr,
2375 	&dev_attr_seq_idx.attr,
2376 	&dev_attr_seq_state.attr,
2377 	&dev_attr_seq_event.attr,
2378 	&dev_attr_seq_reset_event.attr,
2379 	&dev_attr_cntr_idx.attr,
2380 	&dev_attr_cntrldvr.attr,
2381 	&dev_attr_cntr_val.attr,
2382 	&dev_attr_cntr_ctrl.attr,
2383 	&dev_attr_res_idx.attr,
2384 	&dev_attr_res_ctrl.attr,
2385 	&dev_attr_ctxid_idx.attr,
2386 	&dev_attr_ctxid_pid.attr,
2387 	&dev_attr_ctxid_masks.attr,
2388 	&dev_attr_vmid_idx.attr,
2389 	&dev_attr_vmid_val.attr,
2390 	&dev_attr_vmid_masks.attr,
2391 	&dev_attr_cpu.attr,
2392 	&dev_attr_ts_source.attr,
2393 	NULL,
2394 };
2395 
2396 /*
2397  * Trace ID allocated dynamically on enable - but also allocate on read
2398  * in case sysfs or perf read before enable to ensure consistent metadata
2399  * information for trace decode
2400  */
trctraceid_show(struct device * dev,struct device_attribute * attr,char * buf)2401 static ssize_t trctraceid_show(struct device *dev,
2402 			       struct device_attribute *attr,
2403 			       char *buf)
2404 {
2405 	int trace_id;
2406 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2407 
2408 	trace_id = etm4_read_alloc_trace_id(drvdata);
2409 	if (trace_id < 0)
2410 		return trace_id;
2411 
2412 	return sysfs_emit(buf, "0x%x\n", trace_id);
2413 }
2414 
2415 struct etmv4_reg {
2416 	struct coresight_device *csdev;
2417 	u32 offset;
2418 	u32 data;
2419 };
2420 
do_smp_cross_read(void * data)2421 static void do_smp_cross_read(void *data)
2422 {
2423 	struct etmv4_reg *reg = data;
2424 
2425 	reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
2426 }
2427 
etmv4_cross_read(const struct etmv4_drvdata * drvdata,u32 offset)2428 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2429 {
2430 	struct etmv4_reg reg;
2431 
2432 	reg.offset = offset;
2433 	reg.csdev = drvdata->csdev;
2434 
2435 	/*
2436 	 * smp cross call ensures the CPU will be powered up before
2437 	 * accessing the ETMv4 trace core registers
2438 	 */
2439 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2440 	return reg.data;
2441 }
2442 
coresight_etm4x_attr_to_offset(struct device_attribute * attr)2443 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2444 {
2445 	struct dev_ext_attribute *eattr;
2446 
2447 	eattr = container_of(attr, struct dev_ext_attribute, attr);
2448 	return (u32)(unsigned long)eattr->var;
2449 }
2450 
coresight_etm4x_reg_show(struct device * dev,struct device_attribute * d_attr,char * buf)2451 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2452 					struct device_attribute *d_attr,
2453 					char *buf)
2454 {
2455 	u32 val, offset;
2456 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2457 
2458 	offset = coresight_etm4x_attr_to_offset(d_attr);
2459 
2460 	pm_runtime_get_sync(dev->parent);
2461 	val = etmv4_cross_read(drvdata, offset);
2462 	pm_runtime_put_sync(dev->parent);
2463 
2464 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2465 }
2466 
2467 static inline bool
etm4x_register_implemented(struct etmv4_drvdata * drvdata,u32 offset)2468 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2469 {
2470 	switch (offset) {
2471 	ETM_COMMON_SYSREG_LIST_CASES
2472 		/*
2473 		 * Common registers to ETE & ETM4x accessible via system
2474 		 * instructions are always implemented.
2475 		 */
2476 		return true;
2477 
2478 	ETM4x_ONLY_SYSREG_LIST_CASES
2479 		/*
2480 		 * We only support etm4x and ete. So if the device is not
2481 		 * ETE, it must be ETMv4x.
2482 		 */
2483 		return !etm4x_is_ete(drvdata);
2484 
2485 	ETM4x_MMAP_LIST_CASES
2486 		/*
2487 		 * Registers accessible only via memory-mapped registers
2488 		 * must not be accessed via system instructions.
2489 		 * We cannot access the drvdata->csdev here, as this
2490 		 * function is called during the device creation, via
2491 		 * coresight_register() and the csdev is not initialized
2492 		 * until that is done. So rely on the drvdata->base to
2493 		 * detect if we have a memory mapped access.
2494 		 * Also ETE doesn't implement memory mapped access, thus
2495 		 * it is sufficient to check that we are using mmio.
2496 		 */
2497 		return !!drvdata->base;
2498 
2499 	ETE_ONLY_SYSREG_LIST_CASES
2500 		return etm4x_is_ete(drvdata);
2501 	}
2502 
2503 	return false;
2504 }
2505 
2506 /*
2507  * Hide the ETM4x registers that may not be available on the
2508  * hardware.
2509  * There are certain management registers unavailable via system
2510  * instructions. Make those sysfs attributes hidden on such
2511  * systems.
2512  */
2513 static umode_t
coresight_etm4x_attr_reg_implemented(struct kobject * kobj,struct attribute * attr,int unused)2514 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2515 				     struct attribute *attr, int unused)
2516 {
2517 	struct device *dev = kobj_to_dev(kobj);
2518 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2519 	struct device_attribute *d_attr;
2520 	u32 offset;
2521 
2522 	d_attr = container_of(attr, struct device_attribute, attr);
2523 	offset = coresight_etm4x_attr_to_offset(d_attr);
2524 
2525 	if (etm4x_register_implemented(drvdata, offset))
2526 		return attr->mode;
2527 	return 0;
2528 }
2529 
2530 /*
2531  * Macro to set an RO ext attribute with offset and show function.
2532  * Offset is used in mgmt group to ensure only correct registers for
2533  * the ETM / ETE variant are visible.
2534  */
2535 #define coresight_etm4x_reg_showfn(name, offset, showfn) (	\
2536 	&((struct dev_ext_attribute[]) {			\
2537 	   {							\
2538 		__ATTR(name, 0444, showfn, NULL),		\
2539 		(void *)(unsigned long)offset			\
2540 	   }							\
2541 	})[0].attr.attr						\
2542 	)
2543 
2544 /* macro using the default coresight_etm4x_reg_show function */
2545 #define coresight_etm4x_reg(name, offset)	\
2546 	coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
2547 
2548 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2549 	coresight_etm4x_reg(trcpdcr, TRCPDCR),
2550 	coresight_etm4x_reg(trcpdsr, TRCPDSR),
2551 	coresight_etm4x_reg(trclsr, TRCLSR),
2552 	coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2553 	coresight_etm4x_reg(trcdevid, TRCDEVID),
2554 	coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2555 	coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2556 	coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2557 	coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2558 	coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2559 	coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2560 	coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2561 	coresight_etm4x_reg_showfn(trctraceid, TRCTRACEIDR, trctraceid_show),
2562 	coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2563 	NULL,
2564 };
2565 
2566 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2567 	coresight_etm4x_reg(trcidr0, TRCIDR0),
2568 	coresight_etm4x_reg(trcidr1, TRCIDR1),
2569 	coresight_etm4x_reg(trcidr2, TRCIDR2),
2570 	coresight_etm4x_reg(trcidr3, TRCIDR3),
2571 	coresight_etm4x_reg(trcidr4, TRCIDR4),
2572 	coresight_etm4x_reg(trcidr5, TRCIDR5),
2573 	/* trcidr[6,7] are reserved */
2574 	coresight_etm4x_reg(trcidr8, TRCIDR8),
2575 	coresight_etm4x_reg(trcidr9, TRCIDR9),
2576 	coresight_etm4x_reg(trcidr10, TRCIDR10),
2577 	coresight_etm4x_reg(trcidr11, TRCIDR11),
2578 	coresight_etm4x_reg(trcidr12, TRCIDR12),
2579 	coresight_etm4x_reg(trcidr13, TRCIDR13),
2580 	NULL,
2581 };
2582 
2583 static const struct attribute_group coresight_etmv4_group = {
2584 	.attrs = coresight_etmv4_attrs,
2585 };
2586 
2587 static const struct attribute_group coresight_etmv4_mgmt_group = {
2588 	.is_visible = coresight_etm4x_attr_reg_implemented,
2589 	.attrs = coresight_etmv4_mgmt_attrs,
2590 	.name = "mgmt",
2591 };
2592 
2593 static const struct attribute_group coresight_etmv4_trcidr_group = {
2594 	.attrs = coresight_etmv4_trcidr_attrs,
2595 	.name = "trcidr",
2596 };
2597 
2598 const struct attribute_group *coresight_etmv4_groups[] = {
2599 	&coresight_etmv4_group,
2600 	&coresight_etmv4_mgmt_group,
2601 	&coresight_etmv4_trcidr_group,
2602 	NULL,
2603 };
2604