1 /* SandyBridge-EP/IvyTown uncore support */ 2 #include "uncore.h" 3 4 /* SNB-EP Box level control */ 5 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) 6 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) 7 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) 8 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) 9 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 10 SNBEP_PMON_BOX_CTL_RST_CTRS | \ 11 SNBEP_PMON_BOX_CTL_FRZ_EN) 12 /* SNB-EP event control */ 13 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff 14 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 15 #define SNBEP_PMON_CTL_RST (1 << 17) 16 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) 17 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) 18 #define SNBEP_PMON_CTL_EN (1 << 22) 19 #define SNBEP_PMON_CTL_INVERT (1 << 23) 20 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 21 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 22 SNBEP_PMON_CTL_UMASK_MASK | \ 23 SNBEP_PMON_CTL_EDGE_DET | \ 24 SNBEP_PMON_CTL_INVERT | \ 25 SNBEP_PMON_CTL_TRESH_MASK) 26 27 /* SNB-EP Ubox event control */ 28 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 29 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ 30 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 31 SNBEP_PMON_CTL_UMASK_MASK | \ 32 SNBEP_PMON_CTL_EDGE_DET | \ 33 SNBEP_PMON_CTL_INVERT | \ 34 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 35 36 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) 37 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 38 SNBEP_CBO_PMON_CTL_TID_EN) 39 40 /* SNB-EP PCU event control */ 41 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 42 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 43 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) 44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) 45 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 46 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 47 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 48 SNBEP_PMON_CTL_EDGE_DET | \ 49 SNBEP_PMON_CTL_EV_SEL_EXT | \ 50 SNBEP_PMON_CTL_INVERT | \ 51 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 52 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 53 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 54 55 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 56 (SNBEP_PMON_RAW_EVENT_MASK | \ 57 SNBEP_PMON_CTL_EV_SEL_EXT) 58 59 /* SNB-EP pci control register */ 60 #define SNBEP_PCI_PMON_BOX_CTL 0xf4 61 #define SNBEP_PCI_PMON_CTL0 0xd8 62 /* SNB-EP pci counter register */ 63 #define SNBEP_PCI_PMON_CTR0 0xa0 64 65 /* SNB-EP home agent register */ 66 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 68 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 69 /* SNB-EP memory controller register */ 70 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 72 /* SNB-EP QPI register */ 73 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c 75 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c 77 78 /* SNB-EP Ubox register */ 79 #define SNBEP_U_MSR_PMON_CTR0 0xc16 80 #define SNBEP_U_MSR_PMON_CTL0 0xc10 81 82 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 84 85 /* SNB-EP Cbo register */ 86 #define SNBEP_C0_MSR_PMON_CTR0 0xd16 87 #define SNBEP_C0_MSR_PMON_CTL0 0xd10 88 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 89 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 90 #define SNBEP_CBO_MSR_OFFSET 0x20 91 92 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f 93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 96 97 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ 98 .event = (e), \ 99 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ 100 .config_mask = (m), \ 101 .idx = (i) \ 102 } 103 104 /* SNB-EP PCU register */ 105 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 106 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 107 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 108 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff 110 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc 111 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd 112 113 /* IVBEP event control */ 114 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 115 SNBEP_PMON_BOX_CTL_RST_CTRS) 116 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 117 SNBEP_PMON_CTL_UMASK_MASK | \ 118 SNBEP_PMON_CTL_EDGE_DET | \ 119 SNBEP_PMON_CTL_TRESH_MASK) 120 /* IVBEP Ubox */ 121 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 122 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) 123 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) 124 125 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ 126 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 127 SNBEP_PMON_CTL_UMASK_MASK | \ 128 SNBEP_PMON_CTL_EDGE_DET | \ 129 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 130 /* IVBEP Cbo */ 131 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ 132 SNBEP_CBO_PMON_CTL_TID_EN) 133 134 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) 135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) 136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) 137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 142 143 /* IVBEP home agent */ 144 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) 145 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ 146 (IVBEP_PMON_RAW_EVENT_MASK | \ 147 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) 148 /* IVBEP PCU */ 149 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 150 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 151 SNBEP_PMON_CTL_EV_SEL_EXT | \ 152 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 153 SNBEP_PMON_CTL_EDGE_DET | \ 154 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 155 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 156 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 157 /* IVBEP QPI */ 158 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 159 (IVBEP_PMON_RAW_EVENT_MASK | \ 160 SNBEP_PMON_CTL_EV_SEL_EXT) 161 162 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ 163 ((1ULL << (n)) - 1))) 164 165 /* Haswell-EP Ubox */ 166 #define HSWEP_U_MSR_PMON_CTR0 0x709 167 #define HSWEP_U_MSR_PMON_CTL0 0x705 168 #define HSWEP_U_MSR_PMON_FILTER 0x707 169 170 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704 172 173 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0) 174 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1) 175 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \ 176 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \ 177 HSWEP_U_MSR_PMON_BOX_FILTER_CID) 178 179 /* Haswell-EP CBo */ 180 #define HSWEP_C0_MSR_PMON_CTR0 0xe08 181 #define HSWEP_C0_MSR_PMON_CTL0 0xe01 182 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00 183 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05 184 #define HSWEP_CBO_MSR_OFFSET 0x10 185 186 187 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0) 188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6) 189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17) 190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 195 196 197 /* Haswell-EP Sbox */ 198 #define HSWEP_S0_MSR_PMON_CTR0 0x726 199 #define HSWEP_S0_MSR_PMON_CTL0 0x721 200 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720 201 #define HSWEP_SBOX_MSR_OFFSET 0xa 202 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 203 SNBEP_CBO_PMON_CTL_TID_EN) 204 205 /* Haswell-EP PCU */ 206 #define HSWEP_PCU_MSR_PMON_CTR0 0x717 207 #define HSWEP_PCU_MSR_PMON_CTL0 0x711 208 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710 209 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715 210 211 /* KNL Ubox */ 212 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \ 213 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \ 214 SNBEP_CBO_PMON_CTL_TID_EN) 215 /* KNL CHA */ 216 #define KNL_CHA_MSR_OFFSET 0xc 217 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16) 218 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \ 219 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \ 220 KNL_CHA_MSR_PMON_CTL_QOR) 221 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff 222 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18) 223 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32) 224 225 /* KNL EDC/MC UCLK */ 226 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400 227 #define KNL_UCLK_MSR_PMON_CTL0 0x420 228 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430 229 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c 230 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454 231 #define KNL_PMON_FIXED_CTL_EN 0x1 232 233 /* KNL EDC */ 234 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00 235 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20 236 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30 237 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c 238 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44 239 240 /* KNL MC */ 241 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00 242 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20 243 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30 244 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c 245 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44 246 247 /* KNL IRP */ 248 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0 249 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 250 KNL_CHA_MSR_PMON_CTL_QOR) 251 /* KNL PCU */ 252 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f 253 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7) 254 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000 255 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \ 256 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \ 257 KNL_PCU_PMON_CTL_USE_OCC_CTR | \ 258 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 259 SNBEP_PMON_CTL_EDGE_DET | \ 260 SNBEP_CBO_PMON_CTL_TID_EN | \ 261 SNBEP_PMON_CTL_EV_SEL_EXT | \ 262 SNBEP_PMON_CTL_INVERT | \ 263 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \ 264 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 265 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 266 267 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 268 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); 269 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 270 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 271 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 272 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 273 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 274 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 275 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 276 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 277 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); 278 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); 279 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); 280 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 281 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 282 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); 283 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 284 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); 285 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); 286 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8"); 287 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); 288 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 289 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 290 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 291 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 292 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 293 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 294 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); 295 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); 296 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); 297 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); 298 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); 299 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); 300 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 301 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); 302 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); 303 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); 304 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); 305 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); 306 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); 307 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 308 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 309 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); 310 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); 311 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); 312 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); 313 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); 314 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); 315 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); 316 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); 317 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); 318 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); 319 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); 320 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); 321 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); 322 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); 323 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); 324 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); 325 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); 326 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); 327 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); 328 329 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) 330 { 331 struct pci_dev *pdev = box->pci_dev; 332 int box_ctl = uncore_pci_box_ctl(box); 333 u32 config = 0; 334 335 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 336 config |= SNBEP_PMON_BOX_CTL_FRZ; 337 pci_write_config_dword(pdev, box_ctl, config); 338 } 339 } 340 341 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 342 { 343 struct pci_dev *pdev = box->pci_dev; 344 int box_ctl = uncore_pci_box_ctl(box); 345 u32 config = 0; 346 347 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 348 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 349 pci_write_config_dword(pdev, box_ctl, config); 350 } 351 } 352 353 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 354 { 355 struct pci_dev *pdev = box->pci_dev; 356 struct hw_perf_event *hwc = &event->hw; 357 358 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 359 } 360 361 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) 362 { 363 struct pci_dev *pdev = box->pci_dev; 364 struct hw_perf_event *hwc = &event->hw; 365 366 pci_write_config_dword(pdev, hwc->config_base, hwc->config); 367 } 368 369 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) 370 { 371 struct pci_dev *pdev = box->pci_dev; 372 struct hw_perf_event *hwc = &event->hw; 373 u64 count = 0; 374 375 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 376 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 377 378 return count; 379 } 380 381 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) 382 { 383 struct pci_dev *pdev = box->pci_dev; 384 int box_ctl = uncore_pci_box_ctl(box); 385 386 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT); 387 } 388 389 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) 390 { 391 u64 config; 392 unsigned msr; 393 394 msr = uncore_msr_box_ctl(box); 395 if (msr) { 396 rdmsrl(msr, config); 397 config |= SNBEP_PMON_BOX_CTL_FRZ; 398 wrmsrl(msr, config); 399 } 400 } 401 402 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) 403 { 404 u64 config; 405 unsigned msr; 406 407 msr = uncore_msr_box_ctl(box); 408 if (msr) { 409 rdmsrl(msr, config); 410 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 411 wrmsrl(msr, config); 412 } 413 } 414 415 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 416 { 417 struct hw_perf_event *hwc = &event->hw; 418 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 419 420 if (reg1->idx != EXTRA_REG_NONE) 421 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); 422 423 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 424 } 425 426 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, 427 struct perf_event *event) 428 { 429 struct hw_perf_event *hwc = &event->hw; 430 431 wrmsrl(hwc->config_base, hwc->config); 432 } 433 434 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) 435 { 436 unsigned msr = uncore_msr_box_ctl(box); 437 438 if (msr) 439 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 440 } 441 442 static struct attribute *snbep_uncore_formats_attr[] = { 443 &format_attr_event.attr, 444 &format_attr_umask.attr, 445 &format_attr_edge.attr, 446 &format_attr_inv.attr, 447 &format_attr_thresh8.attr, 448 NULL, 449 }; 450 451 static struct attribute *snbep_uncore_ubox_formats_attr[] = { 452 &format_attr_event.attr, 453 &format_attr_umask.attr, 454 &format_attr_edge.attr, 455 &format_attr_inv.attr, 456 &format_attr_thresh5.attr, 457 NULL, 458 }; 459 460 static struct attribute *snbep_uncore_cbox_formats_attr[] = { 461 &format_attr_event.attr, 462 &format_attr_umask.attr, 463 &format_attr_edge.attr, 464 &format_attr_tid_en.attr, 465 &format_attr_inv.attr, 466 &format_attr_thresh8.attr, 467 &format_attr_filter_tid.attr, 468 &format_attr_filter_nid.attr, 469 &format_attr_filter_state.attr, 470 &format_attr_filter_opc.attr, 471 NULL, 472 }; 473 474 static struct attribute *snbep_uncore_pcu_formats_attr[] = { 475 &format_attr_event_ext.attr, 476 &format_attr_occ_sel.attr, 477 &format_attr_edge.attr, 478 &format_attr_inv.attr, 479 &format_attr_thresh5.attr, 480 &format_attr_occ_invert.attr, 481 &format_attr_occ_edge.attr, 482 &format_attr_filter_band0.attr, 483 &format_attr_filter_band1.attr, 484 &format_attr_filter_band2.attr, 485 &format_attr_filter_band3.attr, 486 NULL, 487 }; 488 489 static struct attribute *snbep_uncore_qpi_formats_attr[] = { 490 &format_attr_event_ext.attr, 491 &format_attr_umask.attr, 492 &format_attr_edge.attr, 493 &format_attr_inv.attr, 494 &format_attr_thresh8.attr, 495 &format_attr_match_rds.attr, 496 &format_attr_match_rnid30.attr, 497 &format_attr_match_rnid4.attr, 498 &format_attr_match_dnid.attr, 499 &format_attr_match_mc.attr, 500 &format_attr_match_opc.attr, 501 &format_attr_match_vnw.attr, 502 &format_attr_match0.attr, 503 &format_attr_match1.attr, 504 &format_attr_mask_rds.attr, 505 &format_attr_mask_rnid30.attr, 506 &format_attr_mask_rnid4.attr, 507 &format_attr_mask_dnid.attr, 508 &format_attr_mask_mc.attr, 509 &format_attr_mask_opc.attr, 510 &format_attr_mask_vnw.attr, 511 &format_attr_mask0.attr, 512 &format_attr_mask1.attr, 513 NULL, 514 }; 515 516 static struct uncore_event_desc snbep_uncore_imc_events[] = { 517 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 518 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 519 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 520 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 521 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 522 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 523 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 524 { /* end: all zeroes */ }, 525 }; 526 527 static struct uncore_event_desc snbep_uncore_qpi_events[] = { 528 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 529 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 530 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), 531 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), 532 { /* end: all zeroes */ }, 533 }; 534 535 static struct attribute_group snbep_uncore_format_group = { 536 .name = "format", 537 .attrs = snbep_uncore_formats_attr, 538 }; 539 540 static struct attribute_group snbep_uncore_ubox_format_group = { 541 .name = "format", 542 .attrs = snbep_uncore_ubox_formats_attr, 543 }; 544 545 static struct attribute_group snbep_uncore_cbox_format_group = { 546 .name = "format", 547 .attrs = snbep_uncore_cbox_formats_attr, 548 }; 549 550 static struct attribute_group snbep_uncore_pcu_format_group = { 551 .name = "format", 552 .attrs = snbep_uncore_pcu_formats_attr, 553 }; 554 555 static struct attribute_group snbep_uncore_qpi_format_group = { 556 .name = "format", 557 .attrs = snbep_uncore_qpi_formats_attr, 558 }; 559 560 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 561 .disable_box = snbep_uncore_msr_disable_box, \ 562 .enable_box = snbep_uncore_msr_enable_box, \ 563 .disable_event = snbep_uncore_msr_disable_event, \ 564 .enable_event = snbep_uncore_msr_enable_event, \ 565 .read_counter = uncore_msr_read_counter 566 567 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 568 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ 569 .init_box = snbep_uncore_msr_init_box \ 570 571 static struct intel_uncore_ops snbep_uncore_msr_ops = { 572 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 573 }; 574 575 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ 576 .init_box = snbep_uncore_pci_init_box, \ 577 .disable_box = snbep_uncore_pci_disable_box, \ 578 .enable_box = snbep_uncore_pci_enable_box, \ 579 .disable_event = snbep_uncore_pci_disable_event, \ 580 .read_counter = snbep_uncore_pci_read_counter 581 582 static struct intel_uncore_ops snbep_uncore_pci_ops = { 583 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 584 .enable_event = snbep_uncore_pci_enable_event, \ 585 }; 586 587 static struct event_constraint snbep_uncore_cbox_constraints[] = { 588 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 589 UNCORE_EVENT_CONSTRAINT(0x02, 0x3), 590 UNCORE_EVENT_CONSTRAINT(0x04, 0x3), 591 UNCORE_EVENT_CONSTRAINT(0x05, 0x3), 592 UNCORE_EVENT_CONSTRAINT(0x07, 0x3), 593 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 594 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 595 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 596 UNCORE_EVENT_CONSTRAINT(0x13, 0x3), 597 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), 598 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), 599 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), 600 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), 601 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), 602 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 603 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 604 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 605 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 606 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 607 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 608 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 609 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 610 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 611 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 612 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 613 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 614 EVENT_CONSTRAINT_END 615 }; 616 617 static struct event_constraint snbep_uncore_r2pcie_constraints[] = { 618 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 619 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 620 UNCORE_EVENT_CONSTRAINT(0x12, 0x1), 621 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 622 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 623 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 624 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 625 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 626 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 627 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 628 EVENT_CONSTRAINT_END 629 }; 630 631 static struct event_constraint snbep_uncore_r3qpi_constraints[] = { 632 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 633 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 634 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 635 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 636 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 637 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 638 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 639 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 640 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 641 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 642 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 643 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 644 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 645 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), 646 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 647 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 648 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 649 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 650 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 651 UNCORE_EVENT_CONSTRAINT(0x30, 0x3), 652 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 653 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 654 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 655 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 656 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 657 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 658 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 659 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 660 EVENT_CONSTRAINT_END 661 }; 662 663 static struct intel_uncore_type snbep_uncore_ubox = { 664 .name = "ubox", 665 .num_counters = 2, 666 .num_boxes = 1, 667 .perf_ctr_bits = 44, 668 .fixed_ctr_bits = 48, 669 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 670 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 671 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 672 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 673 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 674 .ops = &snbep_uncore_msr_ops, 675 .format_group = &snbep_uncore_ubox_format_group, 676 }; 677 678 static struct extra_reg snbep_uncore_cbox_extra_regs[] = { 679 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 680 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 681 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 682 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), 683 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 684 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), 685 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 686 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), 687 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 688 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 689 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 690 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), 691 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), 692 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 693 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 694 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 695 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 696 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 697 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 698 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), 699 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), 700 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 701 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 702 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 703 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), 704 EVENT_EXTRA_END 705 }; 706 707 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 708 { 709 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 710 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 711 int i; 712 713 if (uncore_box_is_fake(box)) 714 return; 715 716 for (i = 0; i < 5; i++) { 717 if (reg1->alloc & (0x1 << i)) 718 atomic_sub(1 << (i * 6), &er->ref); 719 } 720 reg1->alloc = 0; 721 } 722 723 static struct event_constraint * 724 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, 725 u64 (*cbox_filter_mask)(int fields)) 726 { 727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 728 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 729 int i, alloc = 0; 730 unsigned long flags; 731 u64 mask; 732 733 if (reg1->idx == EXTRA_REG_NONE) 734 return NULL; 735 736 raw_spin_lock_irqsave(&er->lock, flags); 737 for (i = 0; i < 5; i++) { 738 if (!(reg1->idx & (0x1 << i))) 739 continue; 740 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) 741 continue; 742 743 mask = cbox_filter_mask(0x1 << i); 744 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || 745 !((reg1->config ^ er->config) & mask)) { 746 atomic_add(1 << (i * 6), &er->ref); 747 er->config &= ~mask; 748 er->config |= reg1->config & mask; 749 alloc |= (0x1 << i); 750 } else { 751 break; 752 } 753 } 754 raw_spin_unlock_irqrestore(&er->lock, flags); 755 if (i < 5) 756 goto fail; 757 758 if (!uncore_box_is_fake(box)) 759 reg1->alloc |= alloc; 760 761 return NULL; 762 fail: 763 for (; i >= 0; i--) { 764 if (alloc & (0x1 << i)) 765 atomic_sub(1 << (i * 6), &er->ref); 766 } 767 return &uncore_constraint_empty; 768 } 769 770 static u64 snbep_cbox_filter_mask(int fields) 771 { 772 u64 mask = 0; 773 774 if (fields & 0x1) 775 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; 776 if (fields & 0x2) 777 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; 778 if (fields & 0x4) 779 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 780 if (fields & 0x8) 781 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 782 783 return mask; 784 } 785 786 static struct event_constraint * 787 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 788 { 789 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); 790 } 791 792 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 793 { 794 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 795 struct extra_reg *er; 796 int idx = 0; 797 798 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { 799 if (er->event != (event->hw.config & er->config_mask)) 800 continue; 801 idx |= er->idx; 802 } 803 804 if (idx) { 805 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 806 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 807 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); 808 reg1->idx = idx; 809 } 810 return 0; 811 } 812 813 static struct intel_uncore_ops snbep_uncore_cbox_ops = { 814 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 815 .hw_config = snbep_cbox_hw_config, 816 .get_constraint = snbep_cbox_get_constraint, 817 .put_constraint = snbep_cbox_put_constraint, 818 }; 819 820 static struct intel_uncore_type snbep_uncore_cbox = { 821 .name = "cbox", 822 .num_counters = 4, 823 .num_boxes = 8, 824 .perf_ctr_bits = 44, 825 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 826 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 827 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 828 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 829 .msr_offset = SNBEP_CBO_MSR_OFFSET, 830 .num_shared_regs = 1, 831 .constraints = snbep_uncore_cbox_constraints, 832 .ops = &snbep_uncore_cbox_ops, 833 .format_group = &snbep_uncore_cbox_format_group, 834 }; 835 836 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) 837 { 838 struct hw_perf_event *hwc = &event->hw; 839 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 840 u64 config = reg1->config; 841 842 if (new_idx > reg1->idx) 843 config <<= 8 * (new_idx - reg1->idx); 844 else 845 config >>= 8 * (reg1->idx - new_idx); 846 847 if (modify) { 848 hwc->config += new_idx - reg1->idx; 849 reg1->config = config; 850 reg1->idx = new_idx; 851 } 852 return config; 853 } 854 855 static struct event_constraint * 856 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 857 { 858 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 859 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 860 unsigned long flags; 861 int idx = reg1->idx; 862 u64 mask, config1 = reg1->config; 863 bool ok = false; 864 865 if (reg1->idx == EXTRA_REG_NONE || 866 (!uncore_box_is_fake(box) && reg1->alloc)) 867 return NULL; 868 again: 869 mask = 0xffULL << (idx * 8); 870 raw_spin_lock_irqsave(&er->lock, flags); 871 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || 872 !((config1 ^ er->config) & mask)) { 873 atomic_add(1 << (idx * 8), &er->ref); 874 er->config &= ~mask; 875 er->config |= config1 & mask; 876 ok = true; 877 } 878 raw_spin_unlock_irqrestore(&er->lock, flags); 879 880 if (!ok) { 881 idx = (idx + 1) % 4; 882 if (idx != reg1->idx) { 883 config1 = snbep_pcu_alter_er(event, idx, false); 884 goto again; 885 } 886 return &uncore_constraint_empty; 887 } 888 889 if (!uncore_box_is_fake(box)) { 890 if (idx != reg1->idx) 891 snbep_pcu_alter_er(event, idx, true); 892 reg1->alloc = 1; 893 } 894 return NULL; 895 } 896 897 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 898 { 899 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 900 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 901 902 if (uncore_box_is_fake(box) || !reg1->alloc) 903 return; 904 905 atomic_sub(1 << (reg1->idx * 8), &er->ref); 906 reg1->alloc = 0; 907 } 908 909 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 910 { 911 struct hw_perf_event *hwc = &event->hw; 912 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 913 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 914 915 if (ev_sel >= 0xb && ev_sel <= 0xe) { 916 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; 917 reg1->idx = ev_sel - 0xb; 918 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); 919 } 920 return 0; 921 } 922 923 static struct intel_uncore_ops snbep_uncore_pcu_ops = { 924 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 925 .hw_config = snbep_pcu_hw_config, 926 .get_constraint = snbep_pcu_get_constraint, 927 .put_constraint = snbep_pcu_put_constraint, 928 }; 929 930 static struct intel_uncore_type snbep_uncore_pcu = { 931 .name = "pcu", 932 .num_counters = 4, 933 .num_boxes = 1, 934 .perf_ctr_bits = 48, 935 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 936 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 937 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 938 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 939 .num_shared_regs = 1, 940 .ops = &snbep_uncore_pcu_ops, 941 .format_group = &snbep_uncore_pcu_format_group, 942 }; 943 944 static struct intel_uncore_type *snbep_msr_uncores[] = { 945 &snbep_uncore_ubox, 946 &snbep_uncore_cbox, 947 &snbep_uncore_pcu, 948 NULL, 949 }; 950 951 void snbep_uncore_cpu_init(void) 952 { 953 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 954 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 955 uncore_msr_uncores = snbep_msr_uncores; 956 } 957 958 enum { 959 SNBEP_PCI_QPI_PORT0_FILTER, 960 SNBEP_PCI_QPI_PORT1_FILTER, 961 HSWEP_PCI_PCU_3, 962 }; 963 964 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) 965 { 966 struct hw_perf_event *hwc = &event->hw; 967 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 968 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 969 970 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { 971 reg1->idx = 0; 972 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; 973 reg1->config = event->attr.config1; 974 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; 975 reg2->config = event->attr.config2; 976 } 977 return 0; 978 } 979 980 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) 981 { 982 struct pci_dev *pdev = box->pci_dev; 983 struct hw_perf_event *hwc = &event->hw; 984 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 985 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 986 987 if (reg1->idx != EXTRA_REG_NONE) { 988 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; 989 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id); 990 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx]; 991 992 if (filter_pdev) { 993 pci_write_config_dword(filter_pdev, reg1->reg, 994 (u32)reg1->config); 995 pci_write_config_dword(filter_pdev, reg1->reg + 4, 996 (u32)(reg1->config >> 32)); 997 pci_write_config_dword(filter_pdev, reg2->reg, 998 (u32)reg2->config); 999 pci_write_config_dword(filter_pdev, reg2->reg + 4, 1000 (u32)(reg2->config >> 32)); 1001 } 1002 } 1003 1004 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1005 } 1006 1007 static struct intel_uncore_ops snbep_uncore_qpi_ops = { 1008 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 1009 .enable_event = snbep_qpi_enable_event, 1010 .hw_config = snbep_qpi_hw_config, 1011 .get_constraint = uncore_get_constraint, 1012 .put_constraint = uncore_put_constraint, 1013 }; 1014 1015 #define SNBEP_UNCORE_PCI_COMMON_INIT() \ 1016 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1017 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1018 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ 1019 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1020 .ops = &snbep_uncore_pci_ops, \ 1021 .format_group = &snbep_uncore_format_group 1022 1023 static struct intel_uncore_type snbep_uncore_ha = { 1024 .name = "ha", 1025 .num_counters = 4, 1026 .num_boxes = 1, 1027 .perf_ctr_bits = 48, 1028 SNBEP_UNCORE_PCI_COMMON_INIT(), 1029 }; 1030 1031 static struct intel_uncore_type snbep_uncore_imc = { 1032 .name = "imc", 1033 .num_counters = 4, 1034 .num_boxes = 4, 1035 .perf_ctr_bits = 48, 1036 .fixed_ctr_bits = 48, 1037 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1038 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1039 .event_descs = snbep_uncore_imc_events, 1040 SNBEP_UNCORE_PCI_COMMON_INIT(), 1041 }; 1042 1043 static struct intel_uncore_type snbep_uncore_qpi = { 1044 .name = "qpi", 1045 .num_counters = 4, 1046 .num_boxes = 2, 1047 .perf_ctr_bits = 48, 1048 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1049 .event_ctl = SNBEP_PCI_PMON_CTL0, 1050 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1051 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1052 .num_shared_regs = 1, 1053 .ops = &snbep_uncore_qpi_ops, 1054 .event_descs = snbep_uncore_qpi_events, 1055 .format_group = &snbep_uncore_qpi_format_group, 1056 }; 1057 1058 1059 static struct intel_uncore_type snbep_uncore_r2pcie = { 1060 .name = "r2pcie", 1061 .num_counters = 4, 1062 .num_boxes = 1, 1063 .perf_ctr_bits = 44, 1064 .constraints = snbep_uncore_r2pcie_constraints, 1065 SNBEP_UNCORE_PCI_COMMON_INIT(), 1066 }; 1067 1068 static struct intel_uncore_type snbep_uncore_r3qpi = { 1069 .name = "r3qpi", 1070 .num_counters = 3, 1071 .num_boxes = 2, 1072 .perf_ctr_bits = 44, 1073 .constraints = snbep_uncore_r3qpi_constraints, 1074 SNBEP_UNCORE_PCI_COMMON_INIT(), 1075 }; 1076 1077 enum { 1078 SNBEP_PCI_UNCORE_HA, 1079 SNBEP_PCI_UNCORE_IMC, 1080 SNBEP_PCI_UNCORE_QPI, 1081 SNBEP_PCI_UNCORE_R2PCIE, 1082 SNBEP_PCI_UNCORE_R3QPI, 1083 }; 1084 1085 static struct intel_uncore_type *snbep_pci_uncores[] = { 1086 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, 1087 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, 1088 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, 1089 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, 1090 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, 1091 NULL, 1092 }; 1093 1094 static const struct pci_device_id snbep_uncore_pci_ids[] = { 1095 { /* Home Agent */ 1096 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 1097 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), 1098 }, 1099 { /* MC Channel 0 */ 1100 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 1101 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), 1102 }, 1103 { /* MC Channel 1 */ 1104 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 1105 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), 1106 }, 1107 { /* MC Channel 2 */ 1108 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 1109 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), 1110 }, 1111 { /* MC Channel 3 */ 1112 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 1113 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), 1114 }, 1115 { /* QPI Port 0 */ 1116 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 1117 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), 1118 }, 1119 { /* QPI Port 1 */ 1120 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 1121 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), 1122 }, 1123 { /* R2PCIe */ 1124 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 1125 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), 1126 }, 1127 { /* R3QPI Link 0 */ 1128 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 1129 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), 1130 }, 1131 { /* R3QPI Link 1 */ 1132 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 1133 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), 1134 }, 1135 { /* QPI Port 0 filter */ 1136 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), 1137 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1138 SNBEP_PCI_QPI_PORT0_FILTER), 1139 }, 1140 { /* QPI Port 0 filter */ 1141 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), 1142 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1143 SNBEP_PCI_QPI_PORT1_FILTER), 1144 }, 1145 { /* end: all zeroes */ } 1146 }; 1147 1148 static struct pci_driver snbep_uncore_pci_driver = { 1149 .name = "snbep_uncore", 1150 .id_table = snbep_uncore_pci_ids, 1151 }; 1152 1153 /* 1154 * build pci bus to socket mapping 1155 */ 1156 static int snbep_pci2phy_map_init(int devid) 1157 { 1158 struct pci_dev *ubox_dev = NULL; 1159 int i, bus, nodeid, segment; 1160 struct pci2phy_map *map; 1161 int err = 0; 1162 u32 config = 0; 1163 1164 while (1) { 1165 /* find the UBOX device */ 1166 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); 1167 if (!ubox_dev) 1168 break; 1169 bus = ubox_dev->bus->number; 1170 /* get the Node ID of the local register */ 1171 err = pci_read_config_dword(ubox_dev, 0x40, &config); 1172 if (err) 1173 break; 1174 nodeid = config; 1175 /* get the Node ID mapping */ 1176 err = pci_read_config_dword(ubox_dev, 0x54, &config); 1177 if (err) 1178 break; 1179 1180 segment = pci_domain_nr(ubox_dev->bus); 1181 raw_spin_lock(&pci2phy_map_lock); 1182 map = __find_pci2phy_map(segment); 1183 if (!map) { 1184 raw_spin_unlock(&pci2phy_map_lock); 1185 err = -ENOMEM; 1186 break; 1187 } 1188 1189 /* 1190 * every three bits in the Node ID mapping register maps 1191 * to a particular node. 1192 */ 1193 for (i = 0; i < 8; i++) { 1194 if (nodeid == ((config >> (3 * i)) & 0x7)) { 1195 map->pbus_to_physid[bus] = i; 1196 break; 1197 } 1198 } 1199 raw_spin_unlock(&pci2phy_map_lock); 1200 } 1201 1202 if (!err) { 1203 /* 1204 * For PCI bus with no UBOX device, find the next bus 1205 * that has UBOX device and use its mapping. 1206 */ 1207 raw_spin_lock(&pci2phy_map_lock); 1208 list_for_each_entry(map, &pci2phy_map_head, list) { 1209 i = -1; 1210 for (bus = 255; bus >= 0; bus--) { 1211 if (map->pbus_to_physid[bus] >= 0) 1212 i = map->pbus_to_physid[bus]; 1213 else 1214 map->pbus_to_physid[bus] = i; 1215 } 1216 } 1217 raw_spin_unlock(&pci2phy_map_lock); 1218 } 1219 1220 pci_dev_put(ubox_dev); 1221 1222 return err ? pcibios_err_to_errno(err) : 0; 1223 } 1224 1225 int snbep_uncore_pci_init(void) 1226 { 1227 int ret = snbep_pci2phy_map_init(0x3ce0); 1228 if (ret) 1229 return ret; 1230 uncore_pci_uncores = snbep_pci_uncores; 1231 uncore_pci_driver = &snbep_uncore_pci_driver; 1232 return 0; 1233 } 1234 /* end of Sandy Bridge-EP uncore support */ 1235 1236 /* IvyTown uncore support */ 1237 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) 1238 { 1239 unsigned msr = uncore_msr_box_ctl(box); 1240 if (msr) 1241 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); 1242 } 1243 1244 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) 1245 { 1246 struct pci_dev *pdev = box->pci_dev; 1247 1248 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 1249 } 1250 1251 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 1252 .init_box = ivbep_uncore_msr_init_box, \ 1253 .disable_box = snbep_uncore_msr_disable_box, \ 1254 .enable_box = snbep_uncore_msr_enable_box, \ 1255 .disable_event = snbep_uncore_msr_disable_event, \ 1256 .enable_event = snbep_uncore_msr_enable_event, \ 1257 .read_counter = uncore_msr_read_counter 1258 1259 static struct intel_uncore_ops ivbep_uncore_msr_ops = { 1260 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1261 }; 1262 1263 static struct intel_uncore_ops ivbep_uncore_pci_ops = { 1264 .init_box = ivbep_uncore_pci_init_box, 1265 .disable_box = snbep_uncore_pci_disable_box, 1266 .enable_box = snbep_uncore_pci_enable_box, 1267 .disable_event = snbep_uncore_pci_disable_event, 1268 .enable_event = snbep_uncore_pci_enable_event, 1269 .read_counter = snbep_uncore_pci_read_counter, 1270 }; 1271 1272 #define IVBEP_UNCORE_PCI_COMMON_INIT() \ 1273 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1274 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1275 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ 1276 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1277 .ops = &ivbep_uncore_pci_ops, \ 1278 .format_group = &ivbep_uncore_format_group 1279 1280 static struct attribute *ivbep_uncore_formats_attr[] = { 1281 &format_attr_event.attr, 1282 &format_attr_umask.attr, 1283 &format_attr_edge.attr, 1284 &format_attr_inv.attr, 1285 &format_attr_thresh8.attr, 1286 NULL, 1287 }; 1288 1289 static struct attribute *ivbep_uncore_ubox_formats_attr[] = { 1290 &format_attr_event.attr, 1291 &format_attr_umask.attr, 1292 &format_attr_edge.attr, 1293 &format_attr_inv.attr, 1294 &format_attr_thresh5.attr, 1295 NULL, 1296 }; 1297 1298 static struct attribute *ivbep_uncore_cbox_formats_attr[] = { 1299 &format_attr_event.attr, 1300 &format_attr_umask.attr, 1301 &format_attr_edge.attr, 1302 &format_attr_tid_en.attr, 1303 &format_attr_thresh8.attr, 1304 &format_attr_filter_tid.attr, 1305 &format_attr_filter_link.attr, 1306 &format_attr_filter_state2.attr, 1307 &format_attr_filter_nid2.attr, 1308 &format_attr_filter_opc2.attr, 1309 &format_attr_filter_nc.attr, 1310 &format_attr_filter_c6.attr, 1311 &format_attr_filter_isoc.attr, 1312 NULL, 1313 }; 1314 1315 static struct attribute *ivbep_uncore_pcu_formats_attr[] = { 1316 &format_attr_event_ext.attr, 1317 &format_attr_occ_sel.attr, 1318 &format_attr_edge.attr, 1319 &format_attr_thresh5.attr, 1320 &format_attr_occ_invert.attr, 1321 &format_attr_occ_edge.attr, 1322 &format_attr_filter_band0.attr, 1323 &format_attr_filter_band1.attr, 1324 &format_attr_filter_band2.attr, 1325 &format_attr_filter_band3.attr, 1326 NULL, 1327 }; 1328 1329 static struct attribute *ivbep_uncore_qpi_formats_attr[] = { 1330 &format_attr_event_ext.attr, 1331 &format_attr_umask.attr, 1332 &format_attr_edge.attr, 1333 &format_attr_thresh8.attr, 1334 &format_attr_match_rds.attr, 1335 &format_attr_match_rnid30.attr, 1336 &format_attr_match_rnid4.attr, 1337 &format_attr_match_dnid.attr, 1338 &format_attr_match_mc.attr, 1339 &format_attr_match_opc.attr, 1340 &format_attr_match_vnw.attr, 1341 &format_attr_match0.attr, 1342 &format_attr_match1.attr, 1343 &format_attr_mask_rds.attr, 1344 &format_attr_mask_rnid30.attr, 1345 &format_attr_mask_rnid4.attr, 1346 &format_attr_mask_dnid.attr, 1347 &format_attr_mask_mc.attr, 1348 &format_attr_mask_opc.attr, 1349 &format_attr_mask_vnw.attr, 1350 &format_attr_mask0.attr, 1351 &format_attr_mask1.attr, 1352 NULL, 1353 }; 1354 1355 static struct attribute_group ivbep_uncore_format_group = { 1356 .name = "format", 1357 .attrs = ivbep_uncore_formats_attr, 1358 }; 1359 1360 static struct attribute_group ivbep_uncore_ubox_format_group = { 1361 .name = "format", 1362 .attrs = ivbep_uncore_ubox_formats_attr, 1363 }; 1364 1365 static struct attribute_group ivbep_uncore_cbox_format_group = { 1366 .name = "format", 1367 .attrs = ivbep_uncore_cbox_formats_attr, 1368 }; 1369 1370 static struct attribute_group ivbep_uncore_pcu_format_group = { 1371 .name = "format", 1372 .attrs = ivbep_uncore_pcu_formats_attr, 1373 }; 1374 1375 static struct attribute_group ivbep_uncore_qpi_format_group = { 1376 .name = "format", 1377 .attrs = ivbep_uncore_qpi_formats_attr, 1378 }; 1379 1380 static struct intel_uncore_type ivbep_uncore_ubox = { 1381 .name = "ubox", 1382 .num_counters = 2, 1383 .num_boxes = 1, 1384 .perf_ctr_bits = 44, 1385 .fixed_ctr_bits = 48, 1386 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 1387 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 1388 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, 1389 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 1390 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 1391 .ops = &ivbep_uncore_msr_ops, 1392 .format_group = &ivbep_uncore_ubox_format_group, 1393 }; 1394 1395 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = { 1396 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1397 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1398 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1399 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1400 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1401 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1402 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1403 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), 1404 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1405 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), 1406 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1407 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), 1408 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1409 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1410 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 1411 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 1412 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 1413 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 1414 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 1415 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 1416 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 1417 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 1418 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 1419 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1420 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1421 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1422 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 1423 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1424 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1425 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1426 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 1427 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 1428 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 1429 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 1430 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 1431 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 1432 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 1433 EVENT_EXTRA_END 1434 }; 1435 1436 static u64 ivbep_cbox_filter_mask(int fields) 1437 { 1438 u64 mask = 0; 1439 1440 if (fields & 0x1) 1441 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; 1442 if (fields & 0x2) 1443 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; 1444 if (fields & 0x4) 1445 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 1446 if (fields & 0x8) 1447 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; 1448 if (fields & 0x10) { 1449 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 1450 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC; 1451 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6; 1452 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 1453 } 1454 1455 return mask; 1456 } 1457 1458 static struct event_constraint * 1459 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1460 { 1461 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); 1462 } 1463 1464 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1465 { 1466 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1467 struct extra_reg *er; 1468 int idx = 0; 1469 1470 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { 1471 if (er->event != (event->hw.config & er->config_mask)) 1472 continue; 1473 idx |= er->idx; 1474 } 1475 1476 if (idx) { 1477 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 1478 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 1479 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); 1480 reg1->idx = idx; 1481 } 1482 return 0; 1483 } 1484 1485 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1486 { 1487 struct hw_perf_event *hwc = &event->hw; 1488 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1489 1490 if (reg1->idx != EXTRA_REG_NONE) { 1491 u64 filter = uncore_shared_reg_config(box, 0); 1492 wrmsrl(reg1->reg, filter & 0xffffffff); 1493 wrmsrl(reg1->reg + 6, filter >> 32); 1494 } 1495 1496 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1497 } 1498 1499 static struct intel_uncore_ops ivbep_uncore_cbox_ops = { 1500 .init_box = ivbep_uncore_msr_init_box, 1501 .disable_box = snbep_uncore_msr_disable_box, 1502 .enable_box = snbep_uncore_msr_enable_box, 1503 .disable_event = snbep_uncore_msr_disable_event, 1504 .enable_event = ivbep_cbox_enable_event, 1505 .read_counter = uncore_msr_read_counter, 1506 .hw_config = ivbep_cbox_hw_config, 1507 .get_constraint = ivbep_cbox_get_constraint, 1508 .put_constraint = snbep_cbox_put_constraint, 1509 }; 1510 1511 static struct intel_uncore_type ivbep_uncore_cbox = { 1512 .name = "cbox", 1513 .num_counters = 4, 1514 .num_boxes = 15, 1515 .perf_ctr_bits = 44, 1516 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 1517 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 1518 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 1519 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 1520 .msr_offset = SNBEP_CBO_MSR_OFFSET, 1521 .num_shared_regs = 1, 1522 .constraints = snbep_uncore_cbox_constraints, 1523 .ops = &ivbep_uncore_cbox_ops, 1524 .format_group = &ivbep_uncore_cbox_format_group, 1525 }; 1526 1527 static struct intel_uncore_ops ivbep_uncore_pcu_ops = { 1528 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1529 .hw_config = snbep_pcu_hw_config, 1530 .get_constraint = snbep_pcu_get_constraint, 1531 .put_constraint = snbep_pcu_put_constraint, 1532 }; 1533 1534 static struct intel_uncore_type ivbep_uncore_pcu = { 1535 .name = "pcu", 1536 .num_counters = 4, 1537 .num_boxes = 1, 1538 .perf_ctr_bits = 48, 1539 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 1540 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 1541 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 1542 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 1543 .num_shared_regs = 1, 1544 .ops = &ivbep_uncore_pcu_ops, 1545 .format_group = &ivbep_uncore_pcu_format_group, 1546 }; 1547 1548 static struct intel_uncore_type *ivbep_msr_uncores[] = { 1549 &ivbep_uncore_ubox, 1550 &ivbep_uncore_cbox, 1551 &ivbep_uncore_pcu, 1552 NULL, 1553 }; 1554 1555 void ivbep_uncore_cpu_init(void) 1556 { 1557 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 1558 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 1559 uncore_msr_uncores = ivbep_msr_uncores; 1560 } 1561 1562 static struct intel_uncore_type ivbep_uncore_ha = { 1563 .name = "ha", 1564 .num_counters = 4, 1565 .num_boxes = 2, 1566 .perf_ctr_bits = 48, 1567 IVBEP_UNCORE_PCI_COMMON_INIT(), 1568 }; 1569 1570 static struct intel_uncore_type ivbep_uncore_imc = { 1571 .name = "imc", 1572 .num_counters = 4, 1573 .num_boxes = 8, 1574 .perf_ctr_bits = 48, 1575 .fixed_ctr_bits = 48, 1576 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1577 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1578 .event_descs = snbep_uncore_imc_events, 1579 IVBEP_UNCORE_PCI_COMMON_INIT(), 1580 }; 1581 1582 /* registers in IRP boxes are not properly aligned */ 1583 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; 1584 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; 1585 1586 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1587 { 1588 struct pci_dev *pdev = box->pci_dev; 1589 struct hw_perf_event *hwc = &event->hw; 1590 1591 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], 1592 hwc->config | SNBEP_PMON_CTL_EN); 1593 } 1594 1595 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) 1596 { 1597 struct pci_dev *pdev = box->pci_dev; 1598 struct hw_perf_event *hwc = &event->hw; 1599 1600 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config); 1601 } 1602 1603 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 1604 { 1605 struct pci_dev *pdev = box->pci_dev; 1606 struct hw_perf_event *hwc = &event->hw; 1607 u64 count = 0; 1608 1609 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 1610 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 1611 1612 return count; 1613 } 1614 1615 static struct intel_uncore_ops ivbep_uncore_irp_ops = { 1616 .init_box = ivbep_uncore_pci_init_box, 1617 .disable_box = snbep_uncore_pci_disable_box, 1618 .enable_box = snbep_uncore_pci_enable_box, 1619 .disable_event = ivbep_uncore_irp_disable_event, 1620 .enable_event = ivbep_uncore_irp_enable_event, 1621 .read_counter = ivbep_uncore_irp_read_counter, 1622 }; 1623 1624 static struct intel_uncore_type ivbep_uncore_irp = { 1625 .name = "irp", 1626 .num_counters = 4, 1627 .num_boxes = 1, 1628 .perf_ctr_bits = 48, 1629 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, 1630 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1631 .ops = &ivbep_uncore_irp_ops, 1632 .format_group = &ivbep_uncore_format_group, 1633 }; 1634 1635 static struct intel_uncore_ops ivbep_uncore_qpi_ops = { 1636 .init_box = ivbep_uncore_pci_init_box, 1637 .disable_box = snbep_uncore_pci_disable_box, 1638 .enable_box = snbep_uncore_pci_enable_box, 1639 .disable_event = snbep_uncore_pci_disable_event, 1640 .enable_event = snbep_qpi_enable_event, 1641 .read_counter = snbep_uncore_pci_read_counter, 1642 .hw_config = snbep_qpi_hw_config, 1643 .get_constraint = uncore_get_constraint, 1644 .put_constraint = uncore_put_constraint, 1645 }; 1646 1647 static struct intel_uncore_type ivbep_uncore_qpi = { 1648 .name = "qpi", 1649 .num_counters = 4, 1650 .num_boxes = 3, 1651 .perf_ctr_bits = 48, 1652 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1653 .event_ctl = SNBEP_PCI_PMON_CTL0, 1654 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1655 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1656 .num_shared_regs = 1, 1657 .ops = &ivbep_uncore_qpi_ops, 1658 .format_group = &ivbep_uncore_qpi_format_group, 1659 }; 1660 1661 static struct intel_uncore_type ivbep_uncore_r2pcie = { 1662 .name = "r2pcie", 1663 .num_counters = 4, 1664 .num_boxes = 1, 1665 .perf_ctr_bits = 44, 1666 .constraints = snbep_uncore_r2pcie_constraints, 1667 IVBEP_UNCORE_PCI_COMMON_INIT(), 1668 }; 1669 1670 static struct intel_uncore_type ivbep_uncore_r3qpi = { 1671 .name = "r3qpi", 1672 .num_counters = 3, 1673 .num_boxes = 2, 1674 .perf_ctr_bits = 44, 1675 .constraints = snbep_uncore_r3qpi_constraints, 1676 IVBEP_UNCORE_PCI_COMMON_INIT(), 1677 }; 1678 1679 enum { 1680 IVBEP_PCI_UNCORE_HA, 1681 IVBEP_PCI_UNCORE_IMC, 1682 IVBEP_PCI_UNCORE_IRP, 1683 IVBEP_PCI_UNCORE_QPI, 1684 IVBEP_PCI_UNCORE_R2PCIE, 1685 IVBEP_PCI_UNCORE_R3QPI, 1686 }; 1687 1688 static struct intel_uncore_type *ivbep_pci_uncores[] = { 1689 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, 1690 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, 1691 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, 1692 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, 1693 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, 1694 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, 1695 NULL, 1696 }; 1697 1698 static const struct pci_device_id ivbep_uncore_pci_ids[] = { 1699 { /* Home Agent 0 */ 1700 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), 1701 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), 1702 }, 1703 { /* Home Agent 1 */ 1704 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), 1705 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), 1706 }, 1707 { /* MC0 Channel 0 */ 1708 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), 1709 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), 1710 }, 1711 { /* MC0 Channel 1 */ 1712 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), 1713 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), 1714 }, 1715 { /* MC0 Channel 3 */ 1716 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), 1717 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), 1718 }, 1719 { /* MC0 Channel 4 */ 1720 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), 1721 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), 1722 }, 1723 { /* MC1 Channel 0 */ 1724 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), 1725 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), 1726 }, 1727 { /* MC1 Channel 1 */ 1728 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), 1729 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), 1730 }, 1731 { /* MC1 Channel 3 */ 1732 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), 1733 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), 1734 }, 1735 { /* MC1 Channel 4 */ 1736 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), 1737 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), 1738 }, 1739 { /* IRP */ 1740 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), 1741 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), 1742 }, 1743 { /* QPI0 Port 0 */ 1744 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), 1745 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), 1746 }, 1747 { /* QPI0 Port 1 */ 1748 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), 1749 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), 1750 }, 1751 { /* QPI1 Port 2 */ 1752 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), 1753 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), 1754 }, 1755 { /* R2PCIe */ 1756 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), 1757 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), 1758 }, 1759 { /* R3QPI0 Link 0 */ 1760 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), 1761 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), 1762 }, 1763 { /* R3QPI0 Link 1 */ 1764 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), 1765 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), 1766 }, 1767 { /* R3QPI1 Link 2 */ 1768 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), 1769 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), 1770 }, 1771 { /* QPI Port 0 filter */ 1772 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), 1773 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1774 SNBEP_PCI_QPI_PORT0_FILTER), 1775 }, 1776 { /* QPI Port 0 filter */ 1777 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), 1778 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1779 SNBEP_PCI_QPI_PORT1_FILTER), 1780 }, 1781 { /* end: all zeroes */ } 1782 }; 1783 1784 static struct pci_driver ivbep_uncore_pci_driver = { 1785 .name = "ivbep_uncore", 1786 .id_table = ivbep_uncore_pci_ids, 1787 }; 1788 1789 int ivbep_uncore_pci_init(void) 1790 { 1791 int ret = snbep_pci2phy_map_init(0x0e1e); 1792 if (ret) 1793 return ret; 1794 uncore_pci_uncores = ivbep_pci_uncores; 1795 uncore_pci_driver = &ivbep_uncore_pci_driver; 1796 return 0; 1797 } 1798 /* end of IvyTown uncore support */ 1799 1800 /* KNL uncore support */ 1801 static struct attribute *knl_uncore_ubox_formats_attr[] = { 1802 &format_attr_event.attr, 1803 &format_attr_umask.attr, 1804 &format_attr_edge.attr, 1805 &format_attr_tid_en.attr, 1806 &format_attr_inv.attr, 1807 &format_attr_thresh5.attr, 1808 NULL, 1809 }; 1810 1811 static struct attribute_group knl_uncore_ubox_format_group = { 1812 .name = "format", 1813 .attrs = knl_uncore_ubox_formats_attr, 1814 }; 1815 1816 static struct intel_uncore_type knl_uncore_ubox = { 1817 .name = "ubox", 1818 .num_counters = 2, 1819 .num_boxes = 1, 1820 .perf_ctr_bits = 48, 1821 .fixed_ctr_bits = 48, 1822 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 1823 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 1824 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK, 1825 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 1826 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 1827 .ops = &snbep_uncore_msr_ops, 1828 .format_group = &knl_uncore_ubox_format_group, 1829 }; 1830 1831 static struct attribute *knl_uncore_cha_formats_attr[] = { 1832 &format_attr_event.attr, 1833 &format_attr_umask.attr, 1834 &format_attr_qor.attr, 1835 &format_attr_edge.attr, 1836 &format_attr_tid_en.attr, 1837 &format_attr_inv.attr, 1838 &format_attr_thresh8.attr, 1839 &format_attr_filter_tid4.attr, 1840 &format_attr_filter_link3.attr, 1841 &format_attr_filter_state4.attr, 1842 &format_attr_filter_local.attr, 1843 &format_attr_filter_all_op.attr, 1844 &format_attr_filter_nnm.attr, 1845 &format_attr_filter_opc3.attr, 1846 &format_attr_filter_nc.attr, 1847 &format_attr_filter_isoc.attr, 1848 NULL, 1849 }; 1850 1851 static struct attribute_group knl_uncore_cha_format_group = { 1852 .name = "format", 1853 .attrs = knl_uncore_cha_formats_attr, 1854 }; 1855 1856 static struct event_constraint knl_uncore_cha_constraints[] = { 1857 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 1858 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), 1859 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 1860 EVENT_CONSTRAINT_END 1861 }; 1862 1863 static struct extra_reg knl_uncore_cha_extra_regs[] = { 1864 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1865 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1866 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2), 1867 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4), 1868 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4), 1869 EVENT_EXTRA_END 1870 }; 1871 1872 static u64 knl_cha_filter_mask(int fields) 1873 { 1874 u64 mask = 0; 1875 1876 if (fields & 0x1) 1877 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID; 1878 if (fields & 0x2) 1879 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE; 1880 if (fields & 0x4) 1881 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP; 1882 return mask; 1883 } 1884 1885 static struct event_constraint * 1886 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1887 { 1888 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask); 1889 } 1890 1891 static int knl_cha_hw_config(struct intel_uncore_box *box, 1892 struct perf_event *event) 1893 { 1894 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1895 struct extra_reg *er; 1896 int idx = 0; 1897 1898 for (er = knl_uncore_cha_extra_regs; er->msr; er++) { 1899 if (er->event != (event->hw.config & er->config_mask)) 1900 continue; 1901 idx |= er->idx; 1902 } 1903 1904 if (idx) { 1905 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 1906 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; 1907 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx); 1908 reg1->idx = idx; 1909 } 1910 return 0; 1911 } 1912 1913 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 1914 struct perf_event *event); 1915 1916 static struct intel_uncore_ops knl_uncore_cha_ops = { 1917 .init_box = snbep_uncore_msr_init_box, 1918 .disable_box = snbep_uncore_msr_disable_box, 1919 .enable_box = snbep_uncore_msr_enable_box, 1920 .disable_event = snbep_uncore_msr_disable_event, 1921 .enable_event = hswep_cbox_enable_event, 1922 .read_counter = uncore_msr_read_counter, 1923 .hw_config = knl_cha_hw_config, 1924 .get_constraint = knl_cha_get_constraint, 1925 .put_constraint = snbep_cbox_put_constraint, 1926 }; 1927 1928 static struct intel_uncore_type knl_uncore_cha = { 1929 .name = "cha", 1930 .num_counters = 4, 1931 .num_boxes = 38, 1932 .perf_ctr_bits = 48, 1933 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 1934 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 1935 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK, 1936 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 1937 .msr_offset = KNL_CHA_MSR_OFFSET, 1938 .num_shared_regs = 1, 1939 .constraints = knl_uncore_cha_constraints, 1940 .ops = &knl_uncore_cha_ops, 1941 .format_group = &knl_uncore_cha_format_group, 1942 }; 1943 1944 static struct attribute *knl_uncore_pcu_formats_attr[] = { 1945 &format_attr_event2.attr, 1946 &format_attr_use_occ_ctr.attr, 1947 &format_attr_occ_sel.attr, 1948 &format_attr_edge.attr, 1949 &format_attr_tid_en.attr, 1950 &format_attr_inv.attr, 1951 &format_attr_thresh6.attr, 1952 &format_attr_occ_invert.attr, 1953 &format_attr_occ_edge_det.attr, 1954 NULL, 1955 }; 1956 1957 static struct attribute_group knl_uncore_pcu_format_group = { 1958 .name = "format", 1959 .attrs = knl_uncore_pcu_formats_attr, 1960 }; 1961 1962 static struct intel_uncore_type knl_uncore_pcu = { 1963 .name = "pcu", 1964 .num_counters = 4, 1965 .num_boxes = 1, 1966 .perf_ctr_bits = 48, 1967 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 1968 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 1969 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK, 1970 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 1971 .ops = &snbep_uncore_msr_ops, 1972 .format_group = &knl_uncore_pcu_format_group, 1973 }; 1974 1975 static struct intel_uncore_type *knl_msr_uncores[] = { 1976 &knl_uncore_ubox, 1977 &knl_uncore_cha, 1978 &knl_uncore_pcu, 1979 NULL, 1980 }; 1981 1982 void knl_uncore_cpu_init(void) 1983 { 1984 uncore_msr_uncores = knl_msr_uncores; 1985 } 1986 1987 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box) 1988 { 1989 struct pci_dev *pdev = box->pci_dev; 1990 int box_ctl = uncore_pci_box_ctl(box); 1991 1992 pci_write_config_dword(pdev, box_ctl, 0); 1993 } 1994 1995 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box, 1996 struct perf_event *event) 1997 { 1998 struct pci_dev *pdev = box->pci_dev; 1999 struct hw_perf_event *hwc = &event->hw; 2000 2001 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK) 2002 == UNCORE_FIXED_EVENT) 2003 pci_write_config_dword(pdev, hwc->config_base, 2004 hwc->config | KNL_PMON_FIXED_CTL_EN); 2005 else 2006 pci_write_config_dword(pdev, hwc->config_base, 2007 hwc->config | SNBEP_PMON_CTL_EN); 2008 } 2009 2010 static struct intel_uncore_ops knl_uncore_imc_ops = { 2011 .init_box = snbep_uncore_pci_init_box, 2012 .disable_box = snbep_uncore_pci_disable_box, 2013 .enable_box = knl_uncore_imc_enable_box, 2014 .read_counter = snbep_uncore_pci_read_counter, 2015 .enable_event = knl_uncore_imc_enable_event, 2016 .disable_event = snbep_uncore_pci_disable_event, 2017 }; 2018 2019 static struct intel_uncore_type knl_uncore_imc_uclk = { 2020 .name = "imc_uclk", 2021 .num_counters = 4, 2022 .num_boxes = 2, 2023 .perf_ctr_bits = 48, 2024 .fixed_ctr_bits = 48, 2025 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2026 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2027 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2028 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2029 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2030 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2031 .ops = &knl_uncore_imc_ops, 2032 .format_group = &snbep_uncore_format_group, 2033 }; 2034 2035 static struct intel_uncore_type knl_uncore_imc_dclk = { 2036 .name = "imc", 2037 .num_counters = 4, 2038 .num_boxes = 6, 2039 .perf_ctr_bits = 48, 2040 .fixed_ctr_bits = 48, 2041 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW, 2042 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0, 2043 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2044 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW, 2045 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL, 2046 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL, 2047 .ops = &knl_uncore_imc_ops, 2048 .format_group = &snbep_uncore_format_group, 2049 }; 2050 2051 static struct intel_uncore_type knl_uncore_edc_uclk = { 2052 .name = "edc_uclk", 2053 .num_counters = 4, 2054 .num_boxes = 8, 2055 .perf_ctr_bits = 48, 2056 .fixed_ctr_bits = 48, 2057 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2058 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2059 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2060 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2061 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2062 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2063 .ops = &knl_uncore_imc_ops, 2064 .format_group = &snbep_uncore_format_group, 2065 }; 2066 2067 static struct intel_uncore_type knl_uncore_edc_eclk = { 2068 .name = "edc_eclk", 2069 .num_counters = 4, 2070 .num_boxes = 8, 2071 .perf_ctr_bits = 48, 2072 .fixed_ctr_bits = 48, 2073 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW, 2074 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0, 2075 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2076 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW, 2077 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL, 2078 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL, 2079 .ops = &knl_uncore_imc_ops, 2080 .format_group = &snbep_uncore_format_group, 2081 }; 2082 2083 static struct event_constraint knl_uncore_m2pcie_constraints[] = { 2084 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 2085 EVENT_CONSTRAINT_END 2086 }; 2087 2088 static struct intel_uncore_type knl_uncore_m2pcie = { 2089 .name = "m2pcie", 2090 .num_counters = 4, 2091 .num_boxes = 1, 2092 .perf_ctr_bits = 48, 2093 .constraints = knl_uncore_m2pcie_constraints, 2094 SNBEP_UNCORE_PCI_COMMON_INIT(), 2095 }; 2096 2097 static struct attribute *knl_uncore_irp_formats_attr[] = { 2098 &format_attr_event.attr, 2099 &format_attr_umask.attr, 2100 &format_attr_qor.attr, 2101 &format_attr_edge.attr, 2102 &format_attr_inv.attr, 2103 &format_attr_thresh8.attr, 2104 NULL, 2105 }; 2106 2107 static struct attribute_group knl_uncore_irp_format_group = { 2108 .name = "format", 2109 .attrs = knl_uncore_irp_formats_attr, 2110 }; 2111 2112 static struct intel_uncore_type knl_uncore_irp = { 2113 .name = "irp", 2114 .num_counters = 2, 2115 .num_boxes = 1, 2116 .perf_ctr_bits = 48, 2117 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2118 .event_ctl = SNBEP_PCI_PMON_CTL0, 2119 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK, 2120 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL, 2121 .ops = &snbep_uncore_pci_ops, 2122 .format_group = &knl_uncore_irp_format_group, 2123 }; 2124 2125 enum { 2126 KNL_PCI_UNCORE_MC_UCLK, 2127 KNL_PCI_UNCORE_MC_DCLK, 2128 KNL_PCI_UNCORE_EDC_UCLK, 2129 KNL_PCI_UNCORE_EDC_ECLK, 2130 KNL_PCI_UNCORE_M2PCIE, 2131 KNL_PCI_UNCORE_IRP, 2132 }; 2133 2134 static struct intel_uncore_type *knl_pci_uncores[] = { 2135 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk, 2136 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk, 2137 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk, 2138 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk, 2139 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie, 2140 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp, 2141 NULL, 2142 }; 2143 2144 /* 2145 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU 2146 * device type. prior to KNL, each instance of a PMU device type had a unique 2147 * device ID. 2148 * 2149 * PCI Device ID Uncore PMU Devices 2150 * ---------------------------------- 2151 * 0x7841 MC0 UClk, MC1 UClk 2152 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2, 2153 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2 2154 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk, 2155 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk 2156 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk, 2157 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk 2158 * 0x7817 M2PCIe 2159 * 0x7814 IRP 2160 */ 2161 2162 static const struct pci_device_id knl_uncore_pci_ids[] = { 2163 { /* MC UClk */ 2164 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), 2165 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0), 2166 }, 2167 { /* MC DClk Channel */ 2168 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2169 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0), 2170 }, 2171 { /* EDC UClk */ 2172 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2173 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0), 2174 }, 2175 { /* EDC EClk */ 2176 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2177 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0), 2178 }, 2179 { /* M2PCIe */ 2180 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817), 2181 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0), 2182 }, 2183 { /* IRP */ 2184 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814), 2185 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0), 2186 }, 2187 { /* end: all zeroes */ } 2188 }; 2189 2190 static struct pci_driver knl_uncore_pci_driver = { 2191 .name = "knl_uncore", 2192 .id_table = knl_uncore_pci_ids, 2193 }; 2194 2195 int knl_uncore_pci_init(void) 2196 { 2197 int ret; 2198 2199 /* All KNL PCI based PMON units are on the same PCI bus except IRP */ 2200 ret = snb_pci2phy_map_init(0x7814); /* IRP */ 2201 if (ret) 2202 return ret; 2203 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */ 2204 if (ret) 2205 return ret; 2206 uncore_pci_uncores = knl_pci_uncores; 2207 uncore_pci_driver = &knl_uncore_pci_driver; 2208 return 0; 2209 } 2210 2211 /* end of KNL uncore support */ 2212 2213 /* Haswell-EP uncore support */ 2214 static struct attribute *hswep_uncore_ubox_formats_attr[] = { 2215 &format_attr_event.attr, 2216 &format_attr_umask.attr, 2217 &format_attr_edge.attr, 2218 &format_attr_inv.attr, 2219 &format_attr_thresh5.attr, 2220 &format_attr_filter_tid2.attr, 2221 &format_attr_filter_cid.attr, 2222 NULL, 2223 }; 2224 2225 static struct attribute_group hswep_uncore_ubox_format_group = { 2226 .name = "format", 2227 .attrs = hswep_uncore_ubox_formats_attr, 2228 }; 2229 2230 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2231 { 2232 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2233 reg1->reg = HSWEP_U_MSR_PMON_FILTER; 2234 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; 2235 reg1->idx = 0; 2236 return 0; 2237 } 2238 2239 static struct intel_uncore_ops hswep_uncore_ubox_ops = { 2240 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2241 .hw_config = hswep_ubox_hw_config, 2242 .get_constraint = uncore_get_constraint, 2243 .put_constraint = uncore_put_constraint, 2244 }; 2245 2246 static struct intel_uncore_type hswep_uncore_ubox = { 2247 .name = "ubox", 2248 .num_counters = 2, 2249 .num_boxes = 1, 2250 .perf_ctr_bits = 44, 2251 .fixed_ctr_bits = 48, 2252 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2253 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2254 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 2255 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2256 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2257 .num_shared_regs = 1, 2258 .ops = &hswep_uncore_ubox_ops, 2259 .format_group = &hswep_uncore_ubox_format_group, 2260 }; 2261 2262 static struct attribute *hswep_uncore_cbox_formats_attr[] = { 2263 &format_attr_event.attr, 2264 &format_attr_umask.attr, 2265 &format_attr_edge.attr, 2266 &format_attr_tid_en.attr, 2267 &format_attr_thresh8.attr, 2268 &format_attr_filter_tid3.attr, 2269 &format_attr_filter_link2.attr, 2270 &format_attr_filter_state3.attr, 2271 &format_attr_filter_nid2.attr, 2272 &format_attr_filter_opc2.attr, 2273 &format_attr_filter_nc.attr, 2274 &format_attr_filter_c6.attr, 2275 &format_attr_filter_isoc.attr, 2276 NULL, 2277 }; 2278 2279 static struct attribute_group hswep_uncore_cbox_format_group = { 2280 .name = "format", 2281 .attrs = hswep_uncore_cbox_formats_attr, 2282 }; 2283 2284 static struct event_constraint hswep_uncore_cbox_constraints[] = { 2285 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 2286 UNCORE_EVENT_CONSTRAINT(0x09, 0x1), 2287 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2288 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2289 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 2290 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 2291 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 2292 EVENT_CONSTRAINT_END 2293 }; 2294 2295 static struct extra_reg hswep_uncore_cbox_extra_regs[] = { 2296 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 2297 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 2298 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 2299 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 2300 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 2301 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 2302 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), 2303 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4), 2304 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 2305 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8), 2306 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8), 2307 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8), 2308 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8), 2309 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8), 2310 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12), 2311 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 2312 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 2313 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 2314 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 2315 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 2316 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 2317 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 2318 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 2319 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 2320 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 2321 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 2322 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 2323 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 2324 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 2325 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 2326 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 2327 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 2328 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 2329 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 2330 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 2331 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 2332 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 2333 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 2334 EVENT_EXTRA_END 2335 }; 2336 2337 static u64 hswep_cbox_filter_mask(int fields) 2338 { 2339 u64 mask = 0; 2340 if (fields & 0x1) 2341 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID; 2342 if (fields & 0x2) 2343 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK; 2344 if (fields & 0x4) 2345 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE; 2346 if (fields & 0x8) 2347 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID; 2348 if (fields & 0x10) { 2349 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC; 2350 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC; 2351 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6; 2352 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 2353 } 2354 return mask; 2355 } 2356 2357 static struct event_constraint * 2358 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 2359 { 2360 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); 2361 } 2362 2363 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2364 { 2365 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2366 struct extra_reg *er; 2367 int idx = 0; 2368 2369 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) { 2370 if (er->event != (event->hw.config & er->config_mask)) 2371 continue; 2372 idx |= er->idx; 2373 } 2374 2375 if (idx) { 2376 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 2377 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 2378 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx); 2379 reg1->idx = idx; 2380 } 2381 return 0; 2382 } 2383 2384 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 2385 struct perf_event *event) 2386 { 2387 struct hw_perf_event *hwc = &event->hw; 2388 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2389 2390 if (reg1->idx != EXTRA_REG_NONE) { 2391 u64 filter = uncore_shared_reg_config(box, 0); 2392 wrmsrl(reg1->reg, filter & 0xffffffff); 2393 wrmsrl(reg1->reg + 1, filter >> 32); 2394 } 2395 2396 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 2397 } 2398 2399 static struct intel_uncore_ops hswep_uncore_cbox_ops = { 2400 .init_box = snbep_uncore_msr_init_box, 2401 .disable_box = snbep_uncore_msr_disable_box, 2402 .enable_box = snbep_uncore_msr_enable_box, 2403 .disable_event = snbep_uncore_msr_disable_event, 2404 .enable_event = hswep_cbox_enable_event, 2405 .read_counter = uncore_msr_read_counter, 2406 .hw_config = hswep_cbox_hw_config, 2407 .get_constraint = hswep_cbox_get_constraint, 2408 .put_constraint = snbep_cbox_put_constraint, 2409 }; 2410 2411 static struct intel_uncore_type hswep_uncore_cbox = { 2412 .name = "cbox", 2413 .num_counters = 4, 2414 .num_boxes = 18, 2415 .perf_ctr_bits = 48, 2416 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2417 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2418 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 2419 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2420 .msr_offset = HSWEP_CBO_MSR_OFFSET, 2421 .num_shared_regs = 1, 2422 .constraints = hswep_uncore_cbox_constraints, 2423 .ops = &hswep_uncore_cbox_ops, 2424 .format_group = &hswep_uncore_cbox_format_group, 2425 }; 2426 2427 /* 2428 * Write SBOX Initialization register bit by bit to avoid spurious #GPs 2429 */ 2430 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) 2431 { 2432 unsigned msr = uncore_msr_box_ctl(box); 2433 2434 if (msr) { 2435 u64 init = SNBEP_PMON_BOX_CTL_INT; 2436 u64 flags = 0; 2437 int i; 2438 2439 for_each_set_bit(i, (unsigned long *)&init, 64) { 2440 flags |= (1ULL << i); 2441 wrmsrl(msr, flags); 2442 } 2443 } 2444 } 2445 2446 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { 2447 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2448 .init_box = hswep_uncore_sbox_msr_init_box 2449 }; 2450 2451 static struct attribute *hswep_uncore_sbox_formats_attr[] = { 2452 &format_attr_event.attr, 2453 &format_attr_umask.attr, 2454 &format_attr_edge.attr, 2455 &format_attr_tid_en.attr, 2456 &format_attr_inv.attr, 2457 &format_attr_thresh8.attr, 2458 NULL, 2459 }; 2460 2461 static struct attribute_group hswep_uncore_sbox_format_group = { 2462 .name = "format", 2463 .attrs = hswep_uncore_sbox_formats_attr, 2464 }; 2465 2466 static struct intel_uncore_type hswep_uncore_sbox = { 2467 .name = "sbox", 2468 .num_counters = 4, 2469 .num_boxes = 4, 2470 .perf_ctr_bits = 44, 2471 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 2472 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 2473 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 2474 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 2475 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 2476 .ops = &hswep_uncore_sbox_msr_ops, 2477 .format_group = &hswep_uncore_sbox_format_group, 2478 }; 2479 2480 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2481 { 2482 struct hw_perf_event *hwc = &event->hw; 2483 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2484 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 2485 2486 if (ev_sel >= 0xb && ev_sel <= 0xe) { 2487 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER; 2488 reg1->idx = ev_sel - 0xb; 2489 reg1->config = event->attr.config1 & (0xff << reg1->idx); 2490 } 2491 return 0; 2492 } 2493 2494 static struct intel_uncore_ops hswep_uncore_pcu_ops = { 2495 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2496 .hw_config = hswep_pcu_hw_config, 2497 .get_constraint = snbep_pcu_get_constraint, 2498 .put_constraint = snbep_pcu_put_constraint, 2499 }; 2500 2501 static struct intel_uncore_type hswep_uncore_pcu = { 2502 .name = "pcu", 2503 .num_counters = 4, 2504 .num_boxes = 1, 2505 .perf_ctr_bits = 48, 2506 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 2507 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 2508 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 2509 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 2510 .num_shared_regs = 1, 2511 .ops = &hswep_uncore_pcu_ops, 2512 .format_group = &snbep_uncore_pcu_format_group, 2513 }; 2514 2515 static struct intel_uncore_type *hswep_msr_uncores[] = { 2516 &hswep_uncore_ubox, 2517 &hswep_uncore_cbox, 2518 &hswep_uncore_sbox, 2519 &hswep_uncore_pcu, 2520 NULL, 2521 }; 2522 2523 void hswep_uncore_cpu_init(void) 2524 { 2525 int pkg = topology_phys_to_logical_pkg(0); 2526 2527 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2528 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2529 2530 /* Detect 6-8 core systems with only two SBOXes */ 2531 if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { 2532 u32 capid4; 2533 2534 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3], 2535 0x94, &capid4); 2536 if (((capid4 >> 6) & 0x3) == 0) 2537 hswep_uncore_sbox.num_boxes = 2; 2538 } 2539 2540 uncore_msr_uncores = hswep_msr_uncores; 2541 } 2542 2543 static struct intel_uncore_type hswep_uncore_ha = { 2544 .name = "ha", 2545 .num_counters = 5, 2546 .num_boxes = 2, 2547 .perf_ctr_bits = 48, 2548 SNBEP_UNCORE_PCI_COMMON_INIT(), 2549 }; 2550 2551 static struct uncore_event_desc hswep_uncore_imc_events[] = { 2552 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), 2553 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 2554 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 2555 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 2556 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 2557 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 2558 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 2559 { /* end: all zeroes */ }, 2560 }; 2561 2562 static struct intel_uncore_type hswep_uncore_imc = { 2563 .name = "imc", 2564 .num_counters = 5, 2565 .num_boxes = 8, 2566 .perf_ctr_bits = 48, 2567 .fixed_ctr_bits = 48, 2568 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 2569 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 2570 .event_descs = hswep_uncore_imc_events, 2571 SNBEP_UNCORE_PCI_COMMON_INIT(), 2572 }; 2573 2574 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; 2575 2576 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 2577 { 2578 struct pci_dev *pdev = box->pci_dev; 2579 struct hw_perf_event *hwc = &event->hw; 2580 u64 count = 0; 2581 2582 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 2583 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 2584 2585 return count; 2586 } 2587 2588 static struct intel_uncore_ops hswep_uncore_irp_ops = { 2589 .init_box = snbep_uncore_pci_init_box, 2590 .disable_box = snbep_uncore_pci_disable_box, 2591 .enable_box = snbep_uncore_pci_enable_box, 2592 .disable_event = ivbep_uncore_irp_disable_event, 2593 .enable_event = ivbep_uncore_irp_enable_event, 2594 .read_counter = hswep_uncore_irp_read_counter, 2595 }; 2596 2597 static struct intel_uncore_type hswep_uncore_irp = { 2598 .name = "irp", 2599 .num_counters = 4, 2600 .num_boxes = 1, 2601 .perf_ctr_bits = 48, 2602 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2603 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2604 .ops = &hswep_uncore_irp_ops, 2605 .format_group = &snbep_uncore_format_group, 2606 }; 2607 2608 static struct intel_uncore_type hswep_uncore_qpi = { 2609 .name = "qpi", 2610 .num_counters = 5, 2611 .num_boxes = 3, 2612 .perf_ctr_bits = 48, 2613 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2614 .event_ctl = SNBEP_PCI_PMON_CTL0, 2615 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 2616 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2617 .num_shared_regs = 1, 2618 .ops = &snbep_uncore_qpi_ops, 2619 .format_group = &snbep_uncore_qpi_format_group, 2620 }; 2621 2622 static struct event_constraint hswep_uncore_r2pcie_constraints[] = { 2623 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2624 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2625 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2626 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 2627 UNCORE_EVENT_CONSTRAINT(0x24, 0x1), 2628 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 2629 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2630 UNCORE_EVENT_CONSTRAINT(0x27, 0x1), 2631 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 2632 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 2633 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1), 2634 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 2635 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 2636 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 2637 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 2638 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 2639 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 2640 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 2641 EVENT_CONSTRAINT_END 2642 }; 2643 2644 static struct intel_uncore_type hswep_uncore_r2pcie = { 2645 .name = "r2pcie", 2646 .num_counters = 4, 2647 .num_boxes = 1, 2648 .perf_ctr_bits = 48, 2649 .constraints = hswep_uncore_r2pcie_constraints, 2650 SNBEP_UNCORE_PCI_COMMON_INIT(), 2651 }; 2652 2653 static struct event_constraint hswep_uncore_r3qpi_constraints[] = { 2654 UNCORE_EVENT_CONSTRAINT(0x01, 0x3), 2655 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 2656 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 2657 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 2658 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 2659 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 2660 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2661 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2662 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 2663 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2664 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 2665 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 2666 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 2667 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 2668 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 2669 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 2670 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 2671 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 2672 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2673 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 2674 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 2675 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 2676 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 2677 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 2678 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 2679 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 2680 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 2681 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 2682 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 2683 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 2684 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 2685 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 2686 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 2687 EVENT_CONSTRAINT_END 2688 }; 2689 2690 static struct intel_uncore_type hswep_uncore_r3qpi = { 2691 .name = "r3qpi", 2692 .num_counters = 4, 2693 .num_boxes = 3, 2694 .perf_ctr_bits = 44, 2695 .constraints = hswep_uncore_r3qpi_constraints, 2696 SNBEP_UNCORE_PCI_COMMON_INIT(), 2697 }; 2698 2699 enum { 2700 HSWEP_PCI_UNCORE_HA, 2701 HSWEP_PCI_UNCORE_IMC, 2702 HSWEP_PCI_UNCORE_IRP, 2703 HSWEP_PCI_UNCORE_QPI, 2704 HSWEP_PCI_UNCORE_R2PCIE, 2705 HSWEP_PCI_UNCORE_R3QPI, 2706 }; 2707 2708 static struct intel_uncore_type *hswep_pci_uncores[] = { 2709 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha, 2710 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc, 2711 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp, 2712 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi, 2713 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie, 2714 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi, 2715 NULL, 2716 }; 2717 2718 static const struct pci_device_id hswep_uncore_pci_ids[] = { 2719 { /* Home Agent 0 */ 2720 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), 2721 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), 2722 }, 2723 { /* Home Agent 1 */ 2724 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38), 2725 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1), 2726 }, 2727 { /* MC0 Channel 0 */ 2728 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0), 2729 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0), 2730 }, 2731 { /* MC0 Channel 1 */ 2732 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1), 2733 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1), 2734 }, 2735 { /* MC0 Channel 2 */ 2736 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4), 2737 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2), 2738 }, 2739 { /* MC0 Channel 3 */ 2740 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5), 2741 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3), 2742 }, 2743 { /* MC1 Channel 0 */ 2744 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0), 2745 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4), 2746 }, 2747 { /* MC1 Channel 1 */ 2748 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1), 2749 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5), 2750 }, 2751 { /* MC1 Channel 2 */ 2752 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4), 2753 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6), 2754 }, 2755 { /* MC1 Channel 3 */ 2756 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5), 2757 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7), 2758 }, 2759 { /* IRP */ 2760 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39), 2761 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0), 2762 }, 2763 { /* QPI0 Port 0 */ 2764 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32), 2765 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0), 2766 }, 2767 { /* QPI0 Port 1 */ 2768 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33), 2769 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1), 2770 }, 2771 { /* QPI1 Port 2 */ 2772 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a), 2773 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2), 2774 }, 2775 { /* R2PCIe */ 2776 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34), 2777 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0), 2778 }, 2779 { /* R3QPI0 Link 0 */ 2780 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36), 2781 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0), 2782 }, 2783 { /* R3QPI0 Link 1 */ 2784 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37), 2785 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1), 2786 }, 2787 { /* R3QPI1 Link 2 */ 2788 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e), 2789 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2), 2790 }, 2791 { /* QPI Port 0 filter */ 2792 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86), 2793 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2794 SNBEP_PCI_QPI_PORT0_FILTER), 2795 }, 2796 { /* QPI Port 1 filter */ 2797 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), 2798 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2799 SNBEP_PCI_QPI_PORT1_FILTER), 2800 }, 2801 { /* PCU.3 (for Capability registers) */ 2802 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), 2803 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2804 HSWEP_PCI_PCU_3), 2805 }, 2806 { /* end: all zeroes */ } 2807 }; 2808 2809 static struct pci_driver hswep_uncore_pci_driver = { 2810 .name = "hswep_uncore", 2811 .id_table = hswep_uncore_pci_ids, 2812 }; 2813 2814 int hswep_uncore_pci_init(void) 2815 { 2816 int ret = snbep_pci2phy_map_init(0x2f1e); 2817 if (ret) 2818 return ret; 2819 uncore_pci_uncores = hswep_pci_uncores; 2820 uncore_pci_driver = &hswep_uncore_pci_driver; 2821 return 0; 2822 } 2823 /* end of Haswell-EP uncore support */ 2824 2825 /* BDX uncore support */ 2826 2827 static struct intel_uncore_type bdx_uncore_ubox = { 2828 .name = "ubox", 2829 .num_counters = 2, 2830 .num_boxes = 1, 2831 .perf_ctr_bits = 48, 2832 .fixed_ctr_bits = 48, 2833 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2834 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2835 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 2836 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2837 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2838 .num_shared_regs = 1, 2839 .ops = &ivbep_uncore_msr_ops, 2840 .format_group = &ivbep_uncore_ubox_format_group, 2841 }; 2842 2843 static struct event_constraint bdx_uncore_cbox_constraints[] = { 2844 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 2845 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2846 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2847 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 2848 EVENT_CONSTRAINT_END 2849 }; 2850 2851 static struct intel_uncore_type bdx_uncore_cbox = { 2852 .name = "cbox", 2853 .num_counters = 4, 2854 .num_boxes = 24, 2855 .perf_ctr_bits = 48, 2856 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2857 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2858 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 2859 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2860 .msr_offset = HSWEP_CBO_MSR_OFFSET, 2861 .num_shared_regs = 1, 2862 .constraints = bdx_uncore_cbox_constraints, 2863 .ops = &hswep_uncore_cbox_ops, 2864 .format_group = &hswep_uncore_cbox_format_group, 2865 }; 2866 2867 static struct intel_uncore_type bdx_uncore_sbox = { 2868 .name = "sbox", 2869 .num_counters = 4, 2870 .num_boxes = 4, 2871 .perf_ctr_bits = 48, 2872 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 2873 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 2874 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 2875 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 2876 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 2877 .ops = &hswep_uncore_sbox_msr_ops, 2878 .format_group = &hswep_uncore_sbox_format_group, 2879 }; 2880 2881 #define BDX_MSR_UNCORE_SBOX 3 2882 2883 static struct intel_uncore_type *bdx_msr_uncores[] = { 2884 &bdx_uncore_ubox, 2885 &bdx_uncore_cbox, 2886 &hswep_uncore_pcu, 2887 &bdx_uncore_sbox, 2888 NULL, 2889 }; 2890 2891 void bdx_uncore_cpu_init(void) 2892 { 2893 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2894 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2895 uncore_msr_uncores = bdx_msr_uncores; 2896 2897 /* BDX-DE doesn't have SBOX */ 2898 if (boot_cpu_data.x86_model == 86) 2899 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 2900 } 2901 2902 static struct intel_uncore_type bdx_uncore_ha = { 2903 .name = "ha", 2904 .num_counters = 4, 2905 .num_boxes = 2, 2906 .perf_ctr_bits = 48, 2907 SNBEP_UNCORE_PCI_COMMON_INIT(), 2908 }; 2909 2910 static struct intel_uncore_type bdx_uncore_imc = { 2911 .name = "imc", 2912 .num_counters = 5, 2913 .num_boxes = 8, 2914 .perf_ctr_bits = 48, 2915 .fixed_ctr_bits = 48, 2916 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 2917 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 2918 .event_descs = hswep_uncore_imc_events, 2919 SNBEP_UNCORE_PCI_COMMON_INIT(), 2920 }; 2921 2922 static struct intel_uncore_type bdx_uncore_irp = { 2923 .name = "irp", 2924 .num_counters = 4, 2925 .num_boxes = 1, 2926 .perf_ctr_bits = 48, 2927 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2928 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2929 .ops = &hswep_uncore_irp_ops, 2930 .format_group = &snbep_uncore_format_group, 2931 }; 2932 2933 static struct intel_uncore_type bdx_uncore_qpi = { 2934 .name = "qpi", 2935 .num_counters = 4, 2936 .num_boxes = 3, 2937 .perf_ctr_bits = 48, 2938 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2939 .event_ctl = SNBEP_PCI_PMON_CTL0, 2940 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 2941 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2942 .num_shared_regs = 1, 2943 .ops = &snbep_uncore_qpi_ops, 2944 .format_group = &snbep_uncore_qpi_format_group, 2945 }; 2946 2947 static struct event_constraint bdx_uncore_r2pcie_constraints[] = { 2948 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2949 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2950 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2951 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 2952 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 2953 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2954 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 2955 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 2956 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 2957 EVENT_CONSTRAINT_END 2958 }; 2959 2960 static struct intel_uncore_type bdx_uncore_r2pcie = { 2961 .name = "r2pcie", 2962 .num_counters = 4, 2963 .num_boxes = 1, 2964 .perf_ctr_bits = 48, 2965 .constraints = bdx_uncore_r2pcie_constraints, 2966 SNBEP_UNCORE_PCI_COMMON_INIT(), 2967 }; 2968 2969 static struct event_constraint bdx_uncore_r3qpi_constraints[] = { 2970 UNCORE_EVENT_CONSTRAINT(0x01, 0x7), 2971 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 2972 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 2973 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 2974 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 2975 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 2976 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2977 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2978 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2979 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 2980 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 2981 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 2982 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 2983 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 2984 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 2985 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 2986 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 2987 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2988 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 2989 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 2990 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 2991 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 2992 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 2993 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 2994 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 2995 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 2996 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 2997 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 2998 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 2999 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 3000 EVENT_CONSTRAINT_END 3001 }; 3002 3003 static struct intel_uncore_type bdx_uncore_r3qpi = { 3004 .name = "r3qpi", 3005 .num_counters = 3, 3006 .num_boxes = 3, 3007 .perf_ctr_bits = 48, 3008 .constraints = bdx_uncore_r3qpi_constraints, 3009 SNBEP_UNCORE_PCI_COMMON_INIT(), 3010 }; 3011 3012 enum { 3013 BDX_PCI_UNCORE_HA, 3014 BDX_PCI_UNCORE_IMC, 3015 BDX_PCI_UNCORE_IRP, 3016 BDX_PCI_UNCORE_QPI, 3017 BDX_PCI_UNCORE_R2PCIE, 3018 BDX_PCI_UNCORE_R3QPI, 3019 }; 3020 3021 static struct intel_uncore_type *bdx_pci_uncores[] = { 3022 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, 3023 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, 3024 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, 3025 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi, 3026 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, 3027 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi, 3028 NULL, 3029 }; 3030 3031 static const struct pci_device_id bdx_uncore_pci_ids[] = { 3032 { /* Home Agent 0 */ 3033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), 3034 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), 3035 }, 3036 { /* Home Agent 1 */ 3037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38), 3038 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1), 3039 }, 3040 { /* MC0 Channel 0 */ 3041 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), 3042 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), 3043 }, 3044 { /* MC0 Channel 1 */ 3045 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), 3046 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), 3047 }, 3048 { /* MC0 Channel 2 */ 3049 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4), 3050 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2), 3051 }, 3052 { /* MC0 Channel 3 */ 3053 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5), 3054 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3), 3055 }, 3056 { /* MC1 Channel 0 */ 3057 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0), 3058 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4), 3059 }, 3060 { /* MC1 Channel 1 */ 3061 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1), 3062 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5), 3063 }, 3064 { /* MC1 Channel 2 */ 3065 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4), 3066 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6), 3067 }, 3068 { /* MC1 Channel 3 */ 3069 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5), 3070 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7), 3071 }, 3072 { /* IRP */ 3073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), 3074 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), 3075 }, 3076 { /* QPI0 Port 0 */ 3077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32), 3078 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0), 3079 }, 3080 { /* QPI0 Port 1 */ 3081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33), 3082 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1), 3083 }, 3084 { /* QPI1 Port 2 */ 3085 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a), 3086 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2), 3087 }, 3088 { /* R2PCIe */ 3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), 3090 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), 3091 }, 3092 { /* R3QPI0 Link 0 */ 3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36), 3094 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0), 3095 }, 3096 { /* R3QPI0 Link 1 */ 3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37), 3098 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1), 3099 }, 3100 { /* R3QPI1 Link 2 */ 3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e), 3102 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2), 3103 }, 3104 { /* QPI Port 0 filter */ 3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), 3106 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0), 3107 }, 3108 { /* QPI Port 1 filter */ 3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), 3110 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1), 3111 }, 3112 { /* QPI Port 2 filter */ 3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3114 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), 3115 }, 3116 { /* end: all zeroes */ } 3117 }; 3118 3119 static struct pci_driver bdx_uncore_pci_driver = { 3120 .name = "bdx_uncore", 3121 .id_table = bdx_uncore_pci_ids, 3122 }; 3123 3124 int bdx_uncore_pci_init(void) 3125 { 3126 int ret = snbep_pci2phy_map_init(0x6f1e); 3127 3128 if (ret) 3129 return ret; 3130 uncore_pci_uncores = bdx_pci_uncores; 3131 uncore_pci_driver = &bdx_uncore_pci_driver; 3132 return 0; 3133 } 3134 3135 /* end of BDX uncore support */ 3136