1 #include <linux/slab.h> 2 #include <linux/pci.h> 3 #include <asm/apicdef.h> 4 5 #include <linux/perf_event.h> 6 #include "../perf_event.h" 7 8 #define UNCORE_PMU_NAME_LEN 32 9 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 10 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 11 12 #define UNCORE_FIXED_EVENT 0xff 13 #define UNCORE_PMC_IDX_MAX_GENERIC 8 14 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 15 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) 16 17 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \ 18 ((dev << 24) | (func << 16) | (type << 8) | idx) 19 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 20 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff) 21 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff) 22 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 23 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 24 #define UNCORE_EXTRA_PCI_DEV 0xff 25 #define UNCORE_EXTRA_PCI_DEV_MAX 3 26 27 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 28 29 struct pci_extra_dev { 30 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 31 }; 32 33 struct intel_uncore_ops; 34 struct intel_uncore_pmu; 35 struct intel_uncore_box; 36 struct uncore_event_desc; 37 38 struct intel_uncore_type { 39 const char *name; 40 int num_counters; 41 int num_boxes; 42 int perf_ctr_bits; 43 int fixed_ctr_bits; 44 unsigned perf_ctr; 45 unsigned event_ctl; 46 unsigned event_mask; 47 unsigned fixed_ctr; 48 unsigned fixed_ctl; 49 unsigned box_ctl; 50 unsigned msr_offset; 51 unsigned num_shared_regs:8; 52 unsigned single_fixed:1; 53 unsigned pair_ctr_ctl:1; 54 unsigned *msr_offsets; 55 struct event_constraint unconstrainted; 56 struct event_constraint *constraints; 57 struct intel_uncore_pmu *pmus; 58 struct intel_uncore_ops *ops; 59 struct uncore_event_desc *event_descs; 60 const struct attribute_group *attr_groups[4]; 61 struct pmu *pmu; /* for custom pmu ops */ 62 }; 63 64 #define pmu_group attr_groups[0] 65 #define format_group attr_groups[1] 66 #define events_group attr_groups[2] 67 68 struct intel_uncore_ops { 69 void (*init_box)(struct intel_uncore_box *); 70 void (*exit_box)(struct intel_uncore_box *); 71 void (*disable_box)(struct intel_uncore_box *); 72 void (*enable_box)(struct intel_uncore_box *); 73 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 74 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 75 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 76 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 77 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 78 struct perf_event *); 79 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 80 }; 81 82 struct intel_uncore_pmu { 83 struct pmu pmu; 84 char name[UNCORE_PMU_NAME_LEN]; 85 int pmu_idx; 86 int func_id; 87 bool registered; 88 atomic_t activeboxes; 89 struct intel_uncore_type *type; 90 struct intel_uncore_box **boxes; 91 }; 92 93 struct intel_uncore_extra_reg { 94 raw_spinlock_t lock; 95 u64 config, config1, config2; 96 atomic_t ref; 97 }; 98 99 struct intel_uncore_box { 100 int pci_phys_id; 101 int pkgid; 102 int n_active; /* number of active events */ 103 int n_events; 104 int cpu; /* cpu to collect events */ 105 unsigned long flags; 106 atomic_t refcnt; 107 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 108 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 109 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 110 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 111 u64 tags[UNCORE_PMC_IDX_MAX]; 112 struct pci_dev *pci_dev; 113 struct intel_uncore_pmu *pmu; 114 u64 hrtimer_duration; /* hrtimer timeout for this box */ 115 struct hrtimer hrtimer; 116 struct list_head list; 117 struct list_head active_list; 118 void *io_addr; 119 struct intel_uncore_extra_reg shared_regs[0]; 120 }; 121 122 #define UNCORE_BOX_FLAG_INITIATED 0 123 124 struct uncore_event_desc { 125 struct kobj_attribute attr; 126 const char *config; 127 }; 128 129 struct pci2phy_map { 130 struct list_head list; 131 int segment; 132 int pbus_to_physid[256]; 133 }; 134 135 struct pci2phy_map *__find_pci2phy_map(int segment); 136 137 ssize_t uncore_event_show(struct kobject *kobj, 138 struct kobj_attribute *attr, char *buf); 139 140 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 141 { \ 142 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 143 .config = _config, \ 144 } 145 146 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 147 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ 148 struct kobj_attribute *attr, \ 149 char *page) \ 150 { \ 151 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 152 return sprintf(page, _format "\n"); \ 153 } \ 154 static struct kobj_attribute format_attr_##_var = \ 155 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 156 157 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 158 { 159 return box->pmu->type->box_ctl; 160 } 161 162 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 163 { 164 return box->pmu->type->fixed_ctl; 165 } 166 167 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 168 { 169 return box->pmu->type->fixed_ctr; 170 } 171 172 static inline 173 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 174 { 175 return idx * 4 + box->pmu->type->event_ctl; 176 } 177 178 static inline 179 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 180 { 181 return idx * 8 + box->pmu->type->perf_ctr; 182 } 183 184 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 185 { 186 struct intel_uncore_pmu *pmu = box->pmu; 187 return pmu->type->msr_offsets ? 188 pmu->type->msr_offsets[pmu->pmu_idx] : 189 pmu->type->msr_offset * pmu->pmu_idx; 190 } 191 192 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 193 { 194 if (!box->pmu->type->box_ctl) 195 return 0; 196 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 197 } 198 199 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 200 { 201 if (!box->pmu->type->fixed_ctl) 202 return 0; 203 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 204 } 205 206 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 207 { 208 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 209 } 210 211 static inline 212 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 213 { 214 return box->pmu->type->event_ctl + 215 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 216 uncore_msr_box_offset(box); 217 } 218 219 static inline 220 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 221 { 222 return box->pmu->type->perf_ctr + 223 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 224 uncore_msr_box_offset(box); 225 } 226 227 static inline 228 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 229 { 230 if (box->pci_dev) 231 return uncore_pci_fixed_ctl(box); 232 else 233 return uncore_msr_fixed_ctl(box); 234 } 235 236 static inline 237 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 238 { 239 if (box->pci_dev) 240 return uncore_pci_fixed_ctr(box); 241 else 242 return uncore_msr_fixed_ctr(box); 243 } 244 245 static inline 246 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 247 { 248 if (box->pci_dev) 249 return uncore_pci_event_ctl(box, idx); 250 else 251 return uncore_msr_event_ctl(box, idx); 252 } 253 254 static inline 255 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 256 { 257 if (box->pci_dev) 258 return uncore_pci_perf_ctr(box, idx); 259 else 260 return uncore_msr_perf_ctr(box, idx); 261 } 262 263 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 264 { 265 return box->pmu->type->perf_ctr_bits; 266 } 267 268 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 269 { 270 return box->pmu->type->fixed_ctr_bits; 271 } 272 273 static inline int uncore_num_counters(struct intel_uncore_box *box) 274 { 275 return box->pmu->type->num_counters; 276 } 277 278 static inline void uncore_disable_box(struct intel_uncore_box *box) 279 { 280 if (box->pmu->type->ops->disable_box) 281 box->pmu->type->ops->disable_box(box); 282 } 283 284 static inline void uncore_enable_box(struct intel_uncore_box *box) 285 { 286 if (box->pmu->type->ops->enable_box) 287 box->pmu->type->ops->enable_box(box); 288 } 289 290 static inline void uncore_disable_event(struct intel_uncore_box *box, 291 struct perf_event *event) 292 { 293 box->pmu->type->ops->disable_event(box, event); 294 } 295 296 static inline void uncore_enable_event(struct intel_uncore_box *box, 297 struct perf_event *event) 298 { 299 box->pmu->type->ops->enable_event(box, event); 300 } 301 302 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 303 struct perf_event *event) 304 { 305 return box->pmu->type->ops->read_counter(box, event); 306 } 307 308 static inline void uncore_box_init(struct intel_uncore_box *box) 309 { 310 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 311 if (box->pmu->type->ops->init_box) 312 box->pmu->type->ops->init_box(box); 313 } 314 } 315 316 static inline void uncore_box_exit(struct intel_uncore_box *box) 317 { 318 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 319 if (box->pmu->type->ops->exit_box) 320 box->pmu->type->ops->exit_box(box); 321 } 322 } 323 324 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 325 { 326 return (box->pkgid < 0); 327 } 328 329 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 330 { 331 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 332 } 333 334 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 335 { 336 return event->pmu_private; 337 } 338 339 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 340 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 341 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 342 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 343 void uncore_pmu_event_read(struct perf_event *event); 344 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 345 struct event_constraint * 346 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 347 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 348 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 349 350 extern struct intel_uncore_type **uncore_msr_uncores; 351 extern struct intel_uncore_type **uncore_pci_uncores; 352 extern struct pci_driver *uncore_pci_driver; 353 extern raw_spinlock_t pci2phy_map_lock; 354 extern struct list_head pci2phy_map_head; 355 extern struct pci_extra_dev *uncore_extra_pci_dev; 356 extern struct event_constraint uncore_constraint_empty; 357 358 /* perf_event_intel_uncore_snb.c */ 359 int snb_uncore_pci_init(void); 360 int ivb_uncore_pci_init(void); 361 int hsw_uncore_pci_init(void); 362 int bdw_uncore_pci_init(void); 363 int skl_uncore_pci_init(void); 364 void snb_uncore_cpu_init(void); 365 void nhm_uncore_cpu_init(void); 366 void skl_uncore_cpu_init(void); 367 int snb_pci2phy_map_init(int devid); 368 369 /* perf_event_intel_uncore_snbep.c */ 370 int snbep_uncore_pci_init(void); 371 void snbep_uncore_cpu_init(void); 372 int ivbep_uncore_pci_init(void); 373 void ivbep_uncore_cpu_init(void); 374 int hswep_uncore_pci_init(void); 375 void hswep_uncore_cpu_init(void); 376 int bdx_uncore_pci_init(void); 377 void bdx_uncore_cpu_init(void); 378 int knl_uncore_pci_init(void); 379 void knl_uncore_cpu_init(void); 380 381 /* perf_event_intel_uncore_nhmex.c */ 382 void nhmex_uncore_cpu_init(void); 383