1 /* 2 * Copyright 2009 Paul Mackerras, IBM Corporation. 3 * Copyright 2013 Michael Ellerman, IBM Corporation. 4 * Copyright 2016 Madhavan Srinivasan, IBM Corporation. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or any later version. 10 */ 11 12 #ifndef _LINUX_POWERPC_PERF_ISA207_COMMON_H_ 13 #define _LINUX_POWERPC_PERF_ISA207_COMMON_H_ 14 15 #include <linux/kernel.h> 16 #include <linux/perf_event.h> 17 #include <asm/firmware.h> 18 #include <asm/cputable.h> 19 20 /* 21 * Raw event encoding for PowerISA v2.07: 22 * 23 * 60 56 52 48 44 40 36 32 24 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 25 * | | [ ] [ thresh_cmp ] [ thresh_ctl ] 26 * | | | | 27 * | | *- IFM (Linux) thresh start/stop OR FAB match -* 28 * | *- BHRB (Linux) 29 * *- EBB (Linux) 30 * 31 * 28 24 20 16 12 8 4 0 32 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 33 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] 34 * | | | | | 35 * | | | | *- mark 36 * | | *- L1/L2/L3 cache_sel | 37 * | | | 38 * | *- sampling mode for marked events *- combine 39 * | 40 * *- thresh_sel 41 * 42 * Below uses IBM bit numbering. 43 * 44 * MMCR1[x:y] = unit (PMCxUNIT) 45 * MMCR1[x] = combine (PMCxCOMB) 46 * 47 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 48 * # PM_MRK_FAB_RSP_MATCH 49 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 50 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 51 * # PM_MRK_FAB_RSP_MATCH_CYC 52 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 53 * else 54 * MMCRA[48:55] = thresh_ctl (THRESH START/END) 55 * 56 * if thresh_sel: 57 * MMCRA[45:47] = thresh_sel 58 * 59 * if thresh_cmp: 60 * MMCRA[22:24] = thresh_cmp[0:2] 61 * MMCRA[25:31] = thresh_cmp[3:9] 62 * 63 * if unit == 6 or unit == 7 64 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) 65 * else if unit == 8 or unit == 9: 66 * if cache_sel[0] == 0: # L3 bank 67 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) 68 * else if cache_sel[0] == 1: 69 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) 70 * else if cache_sel[1]: # L1 event 71 * MMCR1[16] = cache_sel[2] 72 * MMCR1[17] = cache_sel[3] 73 * 74 * if mark: 75 * MMCRA[63] = 1 (SAMPLE_ENABLE) 76 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) 77 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) 78 * 79 * if EBB and BHRB: 80 * MMCRA[32:33] = IFM 81 * 82 */ 83 84 #define EVENT_EBB_MASK 1ull 85 #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT 86 #define EVENT_BHRB_MASK 1ull 87 #define EVENT_BHRB_SHIFT 62 88 #define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) 89 #define EVENT_IFM_MASK 3ull 90 #define EVENT_IFM_SHIFT 60 91 #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ 92 #define EVENT_THR_CMP_MASK 0x3ff 93 #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ 94 #define EVENT_THR_CTL_MASK 0xffull 95 #define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */ 96 #define EVENT_THR_SEL_MASK 0x7 97 #define EVENT_THRESH_SHIFT 29 /* All threshold bits */ 98 #define EVENT_THRESH_MASK 0x1fffffull 99 #define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */ 100 #define EVENT_SAMPLE_MASK 0x1f 101 #define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */ 102 #define EVENT_CACHE_SEL_MASK 0xf 103 #define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT) 104 #define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */ 105 #define EVENT_PMC_MASK 0xf 106 #define EVENT_UNIT_SHIFT 12 /* Unit */ 107 #define EVENT_UNIT_MASK 0xf 108 #define EVENT_COMBINE_SHIFT 11 /* Combine bit */ 109 #define EVENT_COMBINE_MASK 0x1 110 #define EVENT_COMBINE(v) (((v) >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK) 111 #define EVENT_MARKED_SHIFT 8 /* Marked bit */ 112 #define EVENT_MARKED_MASK 0x1 113 #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) 114 #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ 115 116 /* Bits defined by Linux */ 117 #define EVENT_LINUX_MASK \ 118 ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \ 119 (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \ 120 (EVENT_IFM_MASK << EVENT_IFM_SHIFT)) 121 122 #define EVENT_VALID_MASK \ 123 ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ 124 (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ 125 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ 126 (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ 127 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ 128 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ 129 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ 130 EVENT_LINUX_MASK | \ 131 EVENT_PSEL_MASK) 132 133 #define ONLY_PLM \ 134 (PERF_SAMPLE_BRANCH_USER |\ 135 PERF_SAMPLE_BRANCH_KERNEL |\ 136 PERF_SAMPLE_BRANCH_HV) 137 138 /* Contants to support power9 raw encoding format */ 139 #define p9_EVENT_COMBINE_SHIFT 10 /* Combine bit */ 140 #define p9_EVENT_COMBINE_MASK 0x3ull 141 #define p9_EVENT_COMBINE(v) (((v) >> p9_EVENT_COMBINE_SHIFT) & p9_EVENT_COMBINE_MASK) 142 #define p9_SDAR_MODE_SHIFT 50 143 #define p9_SDAR_MODE_MASK 0x3ull 144 #define p9_SDAR_MODE(v) (((v) >> p9_SDAR_MODE_SHIFT) & p9_SDAR_MODE_MASK) 145 146 #define p9_EVENT_VALID_MASK \ 147 ((p9_SDAR_MODE_MASK << p9_SDAR_MODE_SHIFT | \ 148 (EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ 149 (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ 150 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ 151 (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ 152 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ 153 (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \ 154 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ 155 EVENT_LINUX_MASK | \ 156 EVENT_PSEL_MASK)) 157 158 /* 159 * Layout of constraint bits: 160 * 161 * 60 56 52 48 44 40 36 32 162 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 163 * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ] 164 * | 165 * thresh_sel -* 166 * 167 * 28 24 20 16 12 8 4 0 168 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 169 * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] 170 * | | | | 171 * BHRB IFM -* | | | Count of events for each PMC. 172 * EBB -* | | p1, p2, p3, p4, p5, p6. 173 * L1 I/D qualifier -* | 174 * nc - number of counters -* 175 * 176 * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints 177 * we want the low bit of each field to be added to any existing value. 178 * 179 * Everything else is a value field. 180 */ 181 182 #define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56) 183 #define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK) 184 185 /* We just throw all the threshold bits into the constraint */ 186 #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32) 187 #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK) 188 189 #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) 190 #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) 191 192 #define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25) 193 #define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK) 194 195 #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) 196 #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) 197 198 #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16) 199 #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK) 200 201 /* 202 * For NC we are counting up to 4 events. This requires three bits, and we need 203 * the fifth event to overflow and set the 4th bit. To achieve that we bias the 204 * fields by 3 in test_adder. 205 */ 206 #define CNST_NC_SHIFT 12 207 #define CNST_NC_VAL (1 << CNST_NC_SHIFT) 208 #define CNST_NC_MASK (8 << CNST_NC_SHIFT) 209 #define ISA207_TEST_ADDER (3 << CNST_NC_SHIFT) 210 211 /* 212 * For the per-PMC fields we have two bits. The low bit is added, so if two 213 * events ask for the same PMC the sum will overflow, setting the high bit, 214 * indicating an error. So our mask sets the high bit. 215 */ 216 #define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2) 217 #define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc)) 218 #define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc)) 219 220 /* Our add_fields is defined as: */ 221 #define ISA207_ADD_FIELDS \ 222 CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ 223 CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL 224 225 /* 226 * Lets restrict use of PMC5 for instruction counting. 227 */ 228 #define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5)) 229 230 /* Bits in MMCR1 for PowerISA v2.07 */ 231 #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 232 #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 233 #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 234 #define MMCR1_FAB_SHIFT 36 235 #define MMCR1_DC_QUAL_SHIFT 47 236 #define MMCR1_IC_QUAL_SHIFT 46 237 238 /* MMCR1 Combine bits macro for power9 */ 239 #define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2)) 240 241 /* Bits in MMCRA for PowerISA v2.07 */ 242 #define MMCRA_SAMP_MODE_SHIFT 1 243 #define MMCRA_SAMP_ELIG_SHIFT 4 244 #define MMCRA_THR_CTL_SHIFT 8 245 #define MMCRA_THR_SEL_SHIFT 16 246 #define MMCRA_THR_CMP_SHIFT 32 247 #define MMCRA_SDAR_MODE_SHIFT 42 248 #define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) 249 #define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT) 250 #define MMCRA_IFM_SHIFT 30 251 #define MMCRA_THR_CTR_MANT_SHIFT 19 252 #define MMCRA_THR_CTR_MANT_MASK 0x7Ful 253 #define MMCRA_THR_CTR_MANT(v) (((v) >> MMCRA_THR_CTR_MANT_SHIFT) &\ 254 MMCRA_THR_CTR_MANT_MASK) 255 256 #define MMCRA_THR_CTR_EXP_SHIFT 27 257 #define MMCRA_THR_CTR_EXP_MASK 0x7ul 258 #define MMCRA_THR_CTR_EXP(v) (((v) >> MMCRA_THR_CTR_EXP_SHIFT) &\ 259 MMCRA_THR_CTR_EXP_MASK) 260 261 /* MMCR1 Threshold Compare bit constant for power9 */ 262 #define p9_MMCRA_THR_CMP_SHIFT 45 263 264 /* Bits in MMCR2 for PowerISA v2.07 */ 265 #define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9))) 266 #define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9))) 267 #define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9))) 268 269 #define MAX_ALT 2 270 #define MAX_PMU_COUNTERS 6 271 272 #define ISA207_SIER_TYPE_SHIFT 15 273 #define ISA207_SIER_TYPE_MASK (0x7ull << ISA207_SIER_TYPE_SHIFT) 274 275 #define ISA207_SIER_LDST_SHIFT 1 276 #define ISA207_SIER_LDST_MASK (0x7ull << ISA207_SIER_LDST_SHIFT) 277 278 #define ISA207_SIER_DATA_SRC_SHIFT 53 279 #define ISA207_SIER_DATA_SRC_MASK (0x7ull << ISA207_SIER_DATA_SRC_SHIFT) 280 281 #define P(a, b) PERF_MEM_S(a, b) 282 #define PH(a, b) (P(LVL, HIT) | P(a, b)) 283 #define PM(a, b) (P(LVL, MISS) | P(a, b)) 284 285 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp); 286 int isa207_compute_mmcr(u64 event[], int n_ev, 287 unsigned int hwc[], unsigned long mmcr[], 288 struct perf_event *pevents[]); 289 void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); 290 int isa207_get_alternatives(u64 event, u64 alt[], 291 const unsigned int ev_alt[][MAX_ALT], int size); 292 void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, 293 struct pt_regs *regs); 294 void isa207_get_mem_weight(u64 *weight); 295 296 #endif 297