xref: /linux/arch/powerpc/perf/isa207-common.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 /*
2  * Common Performance counter support functions for PowerISA v2.07 processors.
3  *
4  * Copyright 2009 Paul Mackerras, IBM Corporation.
5  * Copyright 2013 Michael Ellerman, IBM Corporation.
6  * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version
11  * 2 of the License, or (at your option) any later version.
12  */
13 #include "isa207-common.h"
14 
15 static inline bool event_is_fab_match(u64 event)
16 {
17 	/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
18 	event &= 0xff0fe;
19 
20 	/* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
21 	return (event == 0x30056 || event == 0x4f052);
22 }
23 
24 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
25 {
26 	unsigned int unit, pmc, cache, ebb;
27 	unsigned long mask, value;
28 
29 	mask = value = 0;
30 
31 	if (event & ~EVENT_VALID_MASK)
32 		return -1;
33 
34 	pmc   = (event >> EVENT_PMC_SHIFT)        & EVENT_PMC_MASK;
35 	unit  = (event >> EVENT_UNIT_SHIFT)       & EVENT_UNIT_MASK;
36 	cache = (event >> EVENT_CACHE_SEL_SHIFT)  & EVENT_CACHE_SEL_MASK;
37 	ebb   = (event >> EVENT_EBB_SHIFT)        & EVENT_EBB_MASK;
38 
39 	if (pmc) {
40 		u64 base_event;
41 
42 		if (pmc > 6)
43 			return -1;
44 
45 		/* Ignore Linux defined bits when checking event below */
46 		base_event = event & ~EVENT_LINUX_MASK;
47 
48 		if (pmc >= 5 && base_event != 0x500fa &&
49 				base_event != 0x600f4)
50 			return -1;
51 
52 		mask  |= CNST_PMC_MASK(pmc);
53 		value |= CNST_PMC_VAL(pmc);
54 	}
55 
56 	if (pmc <= 4) {
57 		/*
58 		 * Add to number of counters in use. Note this includes events with
59 		 * a PMC of 0 - they still need a PMC, it's just assigned later.
60 		 * Don't count events on PMC 5 & 6, there is only one valid event
61 		 * on each of those counters, and they are handled above.
62 		 */
63 		mask  |= CNST_NC_MASK;
64 		value |= CNST_NC_VAL;
65 	}
66 
67 	if (unit >= 6 && unit <= 9) {
68 		/*
69 		 * L2/L3 events contain a cache selector field, which is
70 		 * supposed to be programmed into MMCRC. However MMCRC is only
71 		 * HV writable, and there is no API for guest kernels to modify
72 		 * it. The solution is for the hypervisor to initialise the
73 		 * field to zeroes, and for us to only ever allow events that
74 		 * have a cache selector of zero. The bank selector (bit 3) is
75 		 * irrelevant, as long as the rest of the value is 0.
76 		 */
77 		if (cache & 0x7)
78 			return -1;
79 
80 	} else if (event & EVENT_IS_L1) {
81 		mask  |= CNST_L1_QUAL_MASK;
82 		value |= CNST_L1_QUAL_VAL(cache);
83 	}
84 
85 	if (event & EVENT_IS_MARKED) {
86 		mask  |= CNST_SAMPLE_MASK;
87 		value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
88 	}
89 
90 	/*
91 	 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
92 	 * the threshold control bits are used for the match value.
93 	 */
94 	if (event_is_fab_match(event)) {
95 		mask  |= CNST_FAB_MATCH_MASK;
96 		value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
97 	} else {
98 		/*
99 		 * Check the mantissa upper two bits are not zero, unless the
100 		 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
101 		 */
102 		unsigned int cmp, exp;
103 
104 		cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
105 		exp = cmp >> 7;
106 
107 		if (exp && (cmp & 0x60) == 0)
108 			return -1;
109 
110 		mask  |= CNST_THRESH_MASK;
111 		value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
112 	}
113 
114 	if (!pmc && ebb)
115 		/* EBB events must specify the PMC */
116 		return -1;
117 
118 	if (event & EVENT_WANTS_BHRB) {
119 		if (!ebb)
120 			/* Only EBB events can request BHRB */
121 			return -1;
122 
123 		mask  |= CNST_IFM_MASK;
124 		value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
125 	}
126 
127 	/*
128 	 * All events must agree on EBB, either all request it or none.
129 	 * EBB events are pinned & exclusive, so this should never actually
130 	 * hit, but we leave it as a fallback in case.
131 	 */
132 	mask  |= CNST_EBB_VAL(ebb);
133 	value |= CNST_EBB_MASK;
134 
135 	*maskp = mask;
136 	*valp = value;
137 
138 	return 0;
139 }
140 
141 int isa207_compute_mmcr(u64 event[], int n_ev,
142 			       unsigned int hwc[], unsigned long mmcr[],
143 			       struct perf_event *pevents[])
144 {
145 	unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
146 	unsigned int pmc, pmc_inuse;
147 	int i;
148 
149 	pmc_inuse = 0;
150 
151 	/* First pass to count resource use */
152 	for (i = 0; i < n_ev; ++i) {
153 		pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
154 		if (pmc)
155 			pmc_inuse |= 1 << pmc;
156 	}
157 
158 	/* In continuous sampling mode, update SDAR on TLB miss */
159 	mmcra = MMCRA_SDAR_MODE_TLB;
160 	mmcr1 = mmcr2 = 0;
161 
162 	/* Second pass: assign PMCs, set all MMCR1 fields */
163 	for (i = 0; i < n_ev; ++i) {
164 		pmc     = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
165 		unit    = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
166 		combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
167 		psel    =  event[i] & EVENT_PSEL_MASK;
168 
169 		if (!pmc) {
170 			for (pmc = 1; pmc <= 4; ++pmc) {
171 				if (!(pmc_inuse & (1 << pmc)))
172 					break;
173 			}
174 
175 			pmc_inuse |= 1 << pmc;
176 		}
177 
178 		if (pmc <= 4) {
179 			mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
180 			mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
181 			mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
182 		}
183 
184 		if (event[i] & EVENT_IS_L1) {
185 			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
186 			mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
187 			cache >>= 1;
188 			mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
189 		}
190 
191 		if (event[i] & EVENT_IS_MARKED) {
192 			mmcra |= MMCRA_SAMPLE_ENABLE;
193 
194 			val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
195 			if (val) {
196 				mmcra |= (val &  3) << MMCRA_SAMP_MODE_SHIFT;
197 				mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
198 			}
199 		}
200 
201 		/*
202 		 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
203 		 * the threshold bits are used for the match value.
204 		 */
205 		if (event_is_fab_match(event[i])) {
206 			mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
207 				  EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
208 		} else {
209 			val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
210 			mmcra |= val << MMCRA_THR_CTL_SHIFT;
211 			val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
212 			mmcra |= val << MMCRA_THR_SEL_SHIFT;
213 			val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
214 			mmcra |= val << MMCRA_THR_CMP_SHIFT;
215 		}
216 
217 		if (event[i] & EVENT_WANTS_BHRB) {
218 			val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
219 			mmcra |= val << MMCRA_IFM_SHIFT;
220 		}
221 
222 		if (pevents[i]->attr.exclude_user)
223 			mmcr2 |= MMCR2_FCP(pmc);
224 
225 		if (pevents[i]->attr.exclude_hv)
226 			mmcr2 |= MMCR2_FCH(pmc);
227 
228 		if (pevents[i]->attr.exclude_kernel) {
229 			if (cpu_has_feature(CPU_FTR_HVMODE))
230 				mmcr2 |= MMCR2_FCH(pmc);
231 			else
232 				mmcr2 |= MMCR2_FCS(pmc);
233 		}
234 
235 		hwc[i] = pmc - 1;
236 	}
237 
238 	/* Return MMCRx values */
239 	mmcr[0] = 0;
240 
241 	/* pmc_inuse is 1-based */
242 	if (pmc_inuse & 2)
243 		mmcr[0] = MMCR0_PMC1CE;
244 
245 	if (pmc_inuse & 0x7c)
246 		mmcr[0] |= MMCR0_PMCjCE;
247 
248 	/* If we're not using PMC 5 or 6, freeze them */
249 	if (!(pmc_inuse & 0x60))
250 		mmcr[0] |= MMCR0_FC56;
251 
252 	mmcr[1] = mmcr1;
253 	mmcr[2] = mmcra;
254 	mmcr[3] = mmcr2;
255 
256 	return 0;
257 }
258 
259 void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
260 {
261 	if (pmc <= 3)
262 		mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
263 }
264