xref: /linux/arch/sh/kernel/cpu/sh4a/perf_event.c (revision 597473720f4dc69749542bfcfed4a927a43d935e)
1*add5ca2cSKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
2ac44e669SPaul Mundt /*
3ac44e669SPaul Mundt  * Performance events support for SH-4A performance counters
4ac44e669SPaul Mundt  *
55e5b3a9dSPaul Mundt  *  Copyright (C) 2009, 2010  Paul Mundt
6ac44e669SPaul Mundt  */
7ac44e669SPaul Mundt #include <linux/kernel.h>
8ac44e669SPaul Mundt #include <linux/init.h>
9ac44e669SPaul Mundt #include <linux/io.h>
10ac44e669SPaul Mundt #include <linux/irq.h>
11ac44e669SPaul Mundt #include <linux/perf_event.h>
12ac44e669SPaul Mundt #include <asm/processor.h>
13ac44e669SPaul Mundt 
14ac44e669SPaul Mundt #define PPC_CCBR(idx)	(0xff200800 + (sizeof(u32) * idx))
15ac44e669SPaul Mundt #define PPC_PMCTR(idx)	(0xfc100000 + (sizeof(u32) * idx))
16ac44e669SPaul Mundt 
17ac44e669SPaul Mundt #define CCBR_CIT_MASK	(0x7ff << 6)
18ac44e669SPaul Mundt #define CCBR_DUC	(1 << 3)
19ac44e669SPaul Mundt #define CCBR_CMDS	(1 << 1)
20ac44e669SPaul Mundt #define CCBR_PPCE	(1 << 0)
21ac44e669SPaul Mundt 
225e5b3a9dSPaul Mundt #ifdef CONFIG_CPU_SHX3
235e5b3a9dSPaul Mundt /*
245e5b3a9dSPaul Mundt  * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
255e5b3a9dSPaul Mundt  * and PMCTR locations remains tentatively constant. This change remains
265e5b3a9dSPaul Mundt  * wholly undocumented, and was simply found through trial and error.
275e5b3a9dSPaul Mundt  *
285e5b3a9dSPaul Mundt  * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
295e5b3a9dSPaul Mundt  * it's unclear when this ceased to be the case. For now we always use
305e5b3a9dSPaul Mundt  * the new location (if future parts keep up with this trend then
315e5b3a9dSPaul Mundt  * scanning for them at runtime also remains a viable option.)
325e5b3a9dSPaul Mundt  *
335e5b3a9dSPaul Mundt  * The gap in the register space also suggests that there are other
345e5b3a9dSPaul Mundt  * undocumented counters, so this will need to be revisited at a later
355e5b3a9dSPaul Mundt  * point in time.
365e5b3a9dSPaul Mundt  */
375e5b3a9dSPaul Mundt #define PPC_PMCAT	0xfc100240
385e5b3a9dSPaul Mundt #else
39ac44e669SPaul Mundt #define PPC_PMCAT	0xfc100080
405e5b3a9dSPaul Mundt #endif
41ac44e669SPaul Mundt 
42ac44e669SPaul Mundt #define PMCAT_OVF3	(1 << 27)
43ac44e669SPaul Mundt #define PMCAT_CNN3	(1 << 26)
44ac44e669SPaul Mundt #define PMCAT_CLR3	(1 << 25)
45ac44e669SPaul Mundt #define PMCAT_OVF2	(1 << 19)
46ac44e669SPaul Mundt #define PMCAT_CLR2	(1 << 17)
47ac44e669SPaul Mundt #define PMCAT_OVF1	(1 << 11)
48ac44e669SPaul Mundt #define PMCAT_CNN1	(1 << 10)
49ac44e669SPaul Mundt #define PMCAT_CLR1	(1 << 9)
50ac44e669SPaul Mundt #define PMCAT_OVF0	(1 << 3)
51ac44e669SPaul Mundt #define PMCAT_CLR0	(1 << 1)
52ac44e669SPaul Mundt 
53ac44e669SPaul Mundt static struct sh_pmu sh4a_pmu;
54ac44e669SPaul Mundt 
55ac44e669SPaul Mundt /*
560fe69d77SPaul Mundt  * Supported raw event codes:
570fe69d77SPaul Mundt  *
580fe69d77SPaul Mundt  *	Event Code	Description
590fe69d77SPaul Mundt  *	----------	-----------
600fe69d77SPaul Mundt  *
610fe69d77SPaul Mundt  *	0x0000		number of elapsed cycles
620fe69d77SPaul Mundt  *	0x0200		number of elapsed cycles in privileged mode
630fe69d77SPaul Mundt  *	0x0280		number of elapsed cycles while SR.BL is asserted
640fe69d77SPaul Mundt  *	0x0202		instruction execution
650fe69d77SPaul Mundt  *	0x0203		instruction execution in parallel
660fe69d77SPaul Mundt  *	0x0204		number of unconditional branches
670fe69d77SPaul Mundt  *	0x0208		number of exceptions
680fe69d77SPaul Mundt  *	0x0209		number of interrupts
690fe69d77SPaul Mundt  *	0x0220		UTLB miss caused by instruction fetch
700fe69d77SPaul Mundt  *	0x0222		UTLB miss caused by operand access
710fe69d77SPaul Mundt  *	0x02a0		number of ITLB misses
720fe69d77SPaul Mundt  *	0x0028		number of accesses to instruction memories
730fe69d77SPaul Mundt  *	0x0029		number of accesses to instruction cache
740fe69d77SPaul Mundt  *	0x002a		instruction cache miss
750fe69d77SPaul Mundt  *	0x022e		number of access to instruction X/Y memory
760fe69d77SPaul Mundt  *	0x0030		number of reads to operand memories
770fe69d77SPaul Mundt  *	0x0038		number of writes to operand memories
780fe69d77SPaul Mundt  *	0x0031		number of operand cache read accesses
790fe69d77SPaul Mundt  *	0x0039		number of operand cache write accesses
800fe69d77SPaul Mundt  *	0x0032		operand cache read miss
810fe69d77SPaul Mundt  *	0x003a		operand cache write miss
820fe69d77SPaul Mundt  *	0x0236		number of reads to operand X/Y memory
830fe69d77SPaul Mundt  *	0x023e		number of writes to operand X/Y memory
840fe69d77SPaul Mundt  *	0x0237		number of reads to operand U memory
850fe69d77SPaul Mundt  *	0x023f		number of writes to operand U memory
860fe69d77SPaul Mundt  *	0x0337		number of U memory read buffer misses
870fe69d77SPaul Mundt  *	0x02b4		number of wait cycles due to operand read access
880fe69d77SPaul Mundt  *	0x02bc		number of wait cycles due to operand write access
890fe69d77SPaul Mundt  *	0x0033		number of wait cycles due to operand cache read miss
900fe69d77SPaul Mundt  *	0x003b		number of wait cycles due to operand cache write miss
910fe69d77SPaul Mundt  */
920fe69d77SPaul Mundt 
930fe69d77SPaul Mundt /*
94ac44e669SPaul Mundt  * Special reserved bits used by hardware emulators, read values will
95ac44e669SPaul Mundt  * vary, but writes must always be 0.
96ac44e669SPaul Mundt  */
97ac44e669SPaul Mundt #define PMCAT_EMU_CLR_MASK	((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
98ac44e669SPaul Mundt 
99ac44e669SPaul Mundt static const int sh4a_general_events[] = {
100ac44e669SPaul Mundt 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0000,
101ac44e669SPaul Mundt 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x0202,
102ac44e669SPaul Mundt 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0029,	/* I-cache */
103ac44e669SPaul Mundt 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x002a,	/* I-cache */
104ac44e669SPaul Mundt 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x0204,
105ac44e669SPaul Mundt 	[PERF_COUNT_HW_BRANCH_MISSES]		= -1,
106ac44e669SPaul Mundt 	[PERF_COUNT_HW_BUS_CYCLES]		= -1,
107ac44e669SPaul Mundt };
108ac44e669SPaul Mundt 
109ac44e669SPaul Mundt #define C(x)	PERF_COUNT_HW_CACHE_##x
110ac44e669SPaul Mundt 
111ac44e669SPaul Mundt static const int sh4a_cache_events
112ac44e669SPaul Mundt 			[PERF_COUNT_HW_CACHE_MAX]
113ac44e669SPaul Mundt 			[PERF_COUNT_HW_CACHE_OP_MAX]
114ac44e669SPaul Mundt 			[PERF_COUNT_HW_CACHE_RESULT_MAX] =
115ac44e669SPaul Mundt {
116ac44e669SPaul Mundt 	[ C(L1D) ] = {
117ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
118ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0031,
119ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0x0032,
120ac44e669SPaul Mundt 		},
121ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
122ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0039,
123ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0x003a,
124ac44e669SPaul Mundt 		},
125ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
126ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
127ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
128ac44e669SPaul Mundt 		},
129ac44e669SPaul Mundt 	},
130ac44e669SPaul Mundt 
131ac44e669SPaul Mundt 	[ C(L1I) ] = {
132ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
133ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0029,
134ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0x002a,
135ac44e669SPaul Mundt 		},
136ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
137ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
138ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
139ac44e669SPaul Mundt 		},
140ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
141ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
142ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
143ac44e669SPaul Mundt 		},
144ac44e669SPaul Mundt 	},
145ac44e669SPaul Mundt 
146ac44e669SPaul Mundt 	[ C(LL) ] = {
147ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
148ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0030,
149ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
150ac44e669SPaul Mundt 		},
151ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
152ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0038,
153ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
154ac44e669SPaul Mundt 		},
155ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
156ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
157ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
158ac44e669SPaul Mundt 		},
159ac44e669SPaul Mundt 	},
160ac44e669SPaul Mundt 
161ac44e669SPaul Mundt 	[ C(DTLB) ] = {
162ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
163ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0x0222,
164ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0x0220,
165ac44e669SPaul Mundt 		},
166ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
167ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
168ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
169ac44e669SPaul Mundt 		},
170ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
171ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
172ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0,
173ac44e669SPaul Mundt 		},
174ac44e669SPaul Mundt 	},
175ac44e669SPaul Mundt 
176ac44e669SPaul Mundt 	[ C(ITLB) ] = {
177ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
178ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = 0,
179ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = 0x02a0,
180ac44e669SPaul Mundt 		},
181ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
182ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
183ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
184ac44e669SPaul Mundt 		},
185ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
186ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
187ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
188ac44e669SPaul Mundt 		},
189ac44e669SPaul Mundt 	},
190ac44e669SPaul Mundt 
191ac44e669SPaul Mundt 	[ C(BPU) ] = {
192ac44e669SPaul Mundt 		[ C(OP_READ) ] = {
193ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
194ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
195ac44e669SPaul Mundt 		},
196ac44e669SPaul Mundt 		[ C(OP_WRITE) ] = {
197ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
198ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
199ac44e669SPaul Mundt 		},
200ac44e669SPaul Mundt 		[ C(OP_PREFETCH) ] = {
201ac44e669SPaul Mundt 			[ C(RESULT_ACCESS) ] = -1,
202ac44e669SPaul Mundt 			[ C(RESULT_MISS)   ] = -1,
203ac44e669SPaul Mundt 		},
204ac44e669SPaul Mundt 	},
20589d6c0b5SPeter Zijlstra 
20689d6c0b5SPeter Zijlstra 	[ C(NODE) ] = {
20789d6c0b5SPeter Zijlstra 		[ C(OP_READ) ] = {
20889d6c0b5SPeter Zijlstra 			[ C(RESULT_ACCESS) ] = -1,
20989d6c0b5SPeter Zijlstra 			[ C(RESULT_MISS)   ] = -1,
21089d6c0b5SPeter Zijlstra 		},
21189d6c0b5SPeter Zijlstra 		[ C(OP_WRITE) ] = {
21289d6c0b5SPeter Zijlstra 			[ C(RESULT_ACCESS) ] = -1,
21389d6c0b5SPeter Zijlstra 			[ C(RESULT_MISS)   ] = -1,
21489d6c0b5SPeter Zijlstra 		},
21589d6c0b5SPeter Zijlstra 		[ C(OP_PREFETCH) ] = {
21689d6c0b5SPeter Zijlstra 			[ C(RESULT_ACCESS) ] = -1,
21789d6c0b5SPeter Zijlstra 			[ C(RESULT_MISS)   ] = -1,
21889d6c0b5SPeter Zijlstra 		},
21989d6c0b5SPeter Zijlstra 	},
220ac44e669SPaul Mundt };
221ac44e669SPaul Mundt 
sh4a_event_map(int event)222ac44e669SPaul Mundt static int sh4a_event_map(int event)
223ac44e669SPaul Mundt {
224ac44e669SPaul Mundt 	return sh4a_general_events[event];
225ac44e669SPaul Mundt }
226ac44e669SPaul Mundt 
sh4a_pmu_read(int idx)227ac44e669SPaul Mundt static u64 sh4a_pmu_read(int idx)
228ac44e669SPaul Mundt {
229ac44e669SPaul Mundt 	return __raw_readl(PPC_PMCTR(idx));
230ac44e669SPaul Mundt }
231ac44e669SPaul Mundt 
sh4a_pmu_disable(struct hw_perf_event * hwc,int idx)232ac44e669SPaul Mundt static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
233ac44e669SPaul Mundt {
234ac44e669SPaul Mundt 	unsigned int tmp;
235ac44e669SPaul Mundt 
236ac44e669SPaul Mundt 	tmp = __raw_readl(PPC_CCBR(idx));
237ac44e669SPaul Mundt 	tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
238ac44e669SPaul Mundt 	__raw_writel(tmp, PPC_CCBR(idx));
239ac44e669SPaul Mundt }
240ac44e669SPaul Mundt 
sh4a_pmu_enable(struct hw_perf_event * hwc,int idx)241ac44e669SPaul Mundt static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
242ac44e669SPaul Mundt {
243ac44e669SPaul Mundt 	unsigned int tmp;
244ac44e669SPaul Mundt 
245ac44e669SPaul Mundt 	tmp = __raw_readl(PPC_PMCAT);
246ac44e669SPaul Mundt 	tmp &= ~PMCAT_EMU_CLR_MASK;
247ac44e669SPaul Mundt 	tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
248ac44e669SPaul Mundt 	__raw_writel(tmp, PPC_PMCAT);
249ac44e669SPaul Mundt 
250ac44e669SPaul Mundt 	tmp = __raw_readl(PPC_CCBR(idx));
251ac44e669SPaul Mundt 	tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
252ac44e669SPaul Mundt 	__raw_writel(tmp, PPC_CCBR(idx));
253ac44e669SPaul Mundt 
254ac44e669SPaul Mundt 	__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
255ac44e669SPaul Mundt }
256ac44e669SPaul Mundt 
sh4a_pmu_disable_all(void)257ac44e669SPaul Mundt static void sh4a_pmu_disable_all(void)
258ac44e669SPaul Mundt {
259ac44e669SPaul Mundt 	int i;
260ac44e669SPaul Mundt 
261ac44e669SPaul Mundt 	for (i = 0; i < sh4a_pmu.num_events; i++)
262ac44e669SPaul Mundt 		__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
263ac44e669SPaul Mundt }
264ac44e669SPaul Mundt 
sh4a_pmu_enable_all(void)265ac44e669SPaul Mundt static void sh4a_pmu_enable_all(void)
266ac44e669SPaul Mundt {
267ac44e669SPaul Mundt 	int i;
268ac44e669SPaul Mundt 
269ac44e669SPaul Mundt 	for (i = 0; i < sh4a_pmu.num_events; i++)
270ac44e669SPaul Mundt 		__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
271ac44e669SPaul Mundt }
272ac44e669SPaul Mundt 
273ac44e669SPaul Mundt static struct sh_pmu sh4a_pmu = {
2742e4f17d2SPaul Mundt 	.name		= "sh4a",
275ac44e669SPaul Mundt 	.num_events	= 2,
276ac44e669SPaul Mundt 	.event_map	= sh4a_event_map,
277ac44e669SPaul Mundt 	.max_events	= ARRAY_SIZE(sh4a_general_events),
278ac44e669SPaul Mundt 	.raw_event_mask	= 0x3ff,
279ac44e669SPaul Mundt 	.cache_events	= &sh4a_cache_events,
280ac44e669SPaul Mundt 	.read		= sh4a_pmu_read,
281ac44e669SPaul Mundt 	.disable	= sh4a_pmu_disable,
282ac44e669SPaul Mundt 	.enable		= sh4a_pmu_enable,
283ac44e669SPaul Mundt 	.disable_all	= sh4a_pmu_disable_all,
284ac44e669SPaul Mundt 	.enable_all	= sh4a_pmu_enable_all,
285ac44e669SPaul Mundt };
286ac44e669SPaul Mundt 
sh4a_pmu_init(void)287ac44e669SPaul Mundt static int __init sh4a_pmu_init(void)
288ac44e669SPaul Mundt {
289ac44e669SPaul Mundt 	/*
290ac44e669SPaul Mundt 	 * Make sure this CPU actually has perf counters.
291ac44e669SPaul Mundt 	 */
292ac44e669SPaul Mundt 	if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
293ac44e669SPaul Mundt 		pr_notice("HW perf events unsupported, software events only.\n");
294ac44e669SPaul Mundt 		return -ENODEV;
295ac44e669SPaul Mundt 	}
296ac44e669SPaul Mundt 
297ac44e669SPaul Mundt 	return register_sh_pmu(&sh4a_pmu);
298ac44e669SPaul Mundt }
299004417a6SPeter Zijlstra early_initcall(sh4a_pmu_init);
300