1 #include <linux/slab.h> 2 #include <linux/pci.h> 3 #include <asm/apicdef.h> 4 5 #include <linux/perf_event.h> 6 #include "../perf_event.h" 7 8 #define UNCORE_PMU_NAME_LEN 32 9 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) 10 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) 11 12 #define UNCORE_FIXED_EVENT 0xff 13 #define UNCORE_PMC_IDX_MAX_GENERIC 8 14 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 15 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) 16 17 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) 18 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 19 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 20 #define UNCORE_EXTRA_PCI_DEV 0xff 21 #define UNCORE_EXTRA_PCI_DEV_MAX 3 22 23 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 24 25 struct pci_extra_dev { 26 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX]; 27 }; 28 29 struct intel_uncore_ops; 30 struct intel_uncore_pmu; 31 struct intel_uncore_box; 32 struct uncore_event_desc; 33 34 struct intel_uncore_type { 35 const char *name; 36 int num_counters; 37 int num_boxes; 38 int perf_ctr_bits; 39 int fixed_ctr_bits; 40 unsigned perf_ctr; 41 unsigned event_ctl; 42 unsigned event_mask; 43 unsigned fixed_ctr; 44 unsigned fixed_ctl; 45 unsigned box_ctl; 46 unsigned msr_offset; 47 unsigned num_shared_regs:8; 48 unsigned single_fixed:1; 49 unsigned pair_ctr_ctl:1; 50 unsigned *msr_offsets; 51 struct event_constraint unconstrainted; 52 struct event_constraint *constraints; 53 struct intel_uncore_pmu *pmus; 54 struct intel_uncore_ops *ops; 55 struct uncore_event_desc *event_descs; 56 const struct attribute_group *attr_groups[4]; 57 struct pmu *pmu; /* for custom pmu ops */ 58 }; 59 60 #define pmu_group attr_groups[0] 61 #define format_group attr_groups[1] 62 #define events_group attr_groups[2] 63 64 struct intel_uncore_ops { 65 void (*init_box)(struct intel_uncore_box *); 66 void (*exit_box)(struct intel_uncore_box *); 67 void (*disable_box)(struct intel_uncore_box *); 68 void (*enable_box)(struct intel_uncore_box *); 69 void (*disable_event)(struct intel_uncore_box *, struct perf_event *); 70 void (*enable_event)(struct intel_uncore_box *, struct perf_event *); 71 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); 72 int (*hw_config)(struct intel_uncore_box *, struct perf_event *); 73 struct event_constraint *(*get_constraint)(struct intel_uncore_box *, 74 struct perf_event *); 75 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); 76 }; 77 78 struct intel_uncore_pmu { 79 struct pmu pmu; 80 char name[UNCORE_PMU_NAME_LEN]; 81 int pmu_idx; 82 int func_id; 83 bool registered; 84 atomic_t activeboxes; 85 struct intel_uncore_type *type; 86 struct intel_uncore_box **boxes; 87 }; 88 89 struct intel_uncore_extra_reg { 90 raw_spinlock_t lock; 91 u64 config, config1, config2; 92 atomic_t ref; 93 }; 94 95 struct intel_uncore_box { 96 int pci_phys_id; 97 int pkgid; 98 int n_active; /* number of active events */ 99 int n_events; 100 int cpu; /* cpu to collect events */ 101 unsigned long flags; 102 atomic_t refcnt; 103 struct perf_event *events[UNCORE_PMC_IDX_MAX]; 104 struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; 105 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; 106 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; 107 u64 tags[UNCORE_PMC_IDX_MAX]; 108 struct pci_dev *pci_dev; 109 struct intel_uncore_pmu *pmu; 110 u64 hrtimer_duration; /* hrtimer timeout for this box */ 111 struct hrtimer hrtimer; 112 struct list_head list; 113 struct list_head active_list; 114 void *io_addr; 115 struct intel_uncore_extra_reg shared_regs[0]; 116 }; 117 118 #define UNCORE_BOX_FLAG_INITIATED 0 119 120 struct uncore_event_desc { 121 struct kobj_attribute attr; 122 const char *config; 123 }; 124 125 struct pci2phy_map { 126 struct list_head list; 127 int segment; 128 int pbus_to_physid[256]; 129 }; 130 131 struct pci2phy_map *__find_pci2phy_map(int segment); 132 133 ssize_t uncore_event_show(struct kobject *kobj, 134 struct kobj_attribute *attr, char *buf); 135 136 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ 137 { \ 138 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ 139 .config = _config, \ 140 } 141 142 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ 143 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ 144 struct kobj_attribute *attr, \ 145 char *page) \ 146 { \ 147 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 148 return sprintf(page, _format "\n"); \ 149 } \ 150 static struct kobj_attribute format_attr_##_var = \ 151 __ATTR(_name, 0444, __uncore_##_var##_show, NULL) 152 153 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) 154 { 155 return box->pmu->type->box_ctl; 156 } 157 158 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) 159 { 160 return box->pmu->type->fixed_ctl; 161 } 162 163 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) 164 { 165 return box->pmu->type->fixed_ctr; 166 } 167 168 static inline 169 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) 170 { 171 return idx * 4 + box->pmu->type->event_ctl; 172 } 173 174 static inline 175 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) 176 { 177 return idx * 8 + box->pmu->type->perf_ctr; 178 } 179 180 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) 181 { 182 struct intel_uncore_pmu *pmu = box->pmu; 183 return pmu->type->msr_offsets ? 184 pmu->type->msr_offsets[pmu->pmu_idx] : 185 pmu->type->msr_offset * pmu->pmu_idx; 186 } 187 188 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) 189 { 190 if (!box->pmu->type->box_ctl) 191 return 0; 192 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); 193 } 194 195 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) 196 { 197 if (!box->pmu->type->fixed_ctl) 198 return 0; 199 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); 200 } 201 202 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) 203 { 204 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); 205 } 206 207 static inline 208 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) 209 { 210 return box->pmu->type->event_ctl + 211 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 212 uncore_msr_box_offset(box); 213 } 214 215 static inline 216 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) 217 { 218 return box->pmu->type->perf_ctr + 219 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + 220 uncore_msr_box_offset(box); 221 } 222 223 static inline 224 unsigned uncore_fixed_ctl(struct intel_uncore_box *box) 225 { 226 if (box->pci_dev) 227 return uncore_pci_fixed_ctl(box); 228 else 229 return uncore_msr_fixed_ctl(box); 230 } 231 232 static inline 233 unsigned uncore_fixed_ctr(struct intel_uncore_box *box) 234 { 235 if (box->pci_dev) 236 return uncore_pci_fixed_ctr(box); 237 else 238 return uncore_msr_fixed_ctr(box); 239 } 240 241 static inline 242 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) 243 { 244 if (box->pci_dev) 245 return uncore_pci_event_ctl(box, idx); 246 else 247 return uncore_msr_event_ctl(box, idx); 248 } 249 250 static inline 251 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) 252 { 253 if (box->pci_dev) 254 return uncore_pci_perf_ctr(box, idx); 255 else 256 return uncore_msr_perf_ctr(box, idx); 257 } 258 259 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) 260 { 261 return box->pmu->type->perf_ctr_bits; 262 } 263 264 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) 265 { 266 return box->pmu->type->fixed_ctr_bits; 267 } 268 269 static inline int uncore_num_counters(struct intel_uncore_box *box) 270 { 271 return box->pmu->type->num_counters; 272 } 273 274 static inline void uncore_disable_box(struct intel_uncore_box *box) 275 { 276 if (box->pmu->type->ops->disable_box) 277 box->pmu->type->ops->disable_box(box); 278 } 279 280 static inline void uncore_enable_box(struct intel_uncore_box *box) 281 { 282 if (box->pmu->type->ops->enable_box) 283 box->pmu->type->ops->enable_box(box); 284 } 285 286 static inline void uncore_disable_event(struct intel_uncore_box *box, 287 struct perf_event *event) 288 { 289 box->pmu->type->ops->disable_event(box, event); 290 } 291 292 static inline void uncore_enable_event(struct intel_uncore_box *box, 293 struct perf_event *event) 294 { 295 box->pmu->type->ops->enable_event(box, event); 296 } 297 298 static inline u64 uncore_read_counter(struct intel_uncore_box *box, 299 struct perf_event *event) 300 { 301 return box->pmu->type->ops->read_counter(box, event); 302 } 303 304 static inline void uncore_box_init(struct intel_uncore_box *box) 305 { 306 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 307 if (box->pmu->type->ops->init_box) 308 box->pmu->type->ops->init_box(box); 309 } 310 } 311 312 static inline void uncore_box_exit(struct intel_uncore_box *box) 313 { 314 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { 315 if (box->pmu->type->ops->exit_box) 316 box->pmu->type->ops->exit_box(box); 317 } 318 } 319 320 static inline bool uncore_box_is_fake(struct intel_uncore_box *box) 321 { 322 return (box->pkgid < 0); 323 } 324 325 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) 326 { 327 return container_of(event->pmu, struct intel_uncore_pmu, pmu); 328 } 329 330 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) 331 { 332 return event->pmu_private; 333 } 334 335 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); 336 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); 337 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); 338 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); 339 void uncore_pmu_event_read(struct perf_event *event); 340 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); 341 struct event_constraint * 342 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); 343 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); 344 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); 345 346 extern struct intel_uncore_type **uncore_msr_uncores; 347 extern struct intel_uncore_type **uncore_pci_uncores; 348 extern struct pci_driver *uncore_pci_driver; 349 extern raw_spinlock_t pci2phy_map_lock; 350 extern struct list_head pci2phy_map_head; 351 extern struct pci_extra_dev *uncore_extra_pci_dev; 352 extern struct event_constraint uncore_constraint_empty; 353 354 /* perf_event_intel_uncore_snb.c */ 355 int snb_uncore_pci_init(void); 356 int ivb_uncore_pci_init(void); 357 int hsw_uncore_pci_init(void); 358 int bdw_uncore_pci_init(void); 359 int skl_uncore_pci_init(void); 360 void snb_uncore_cpu_init(void); 361 void nhm_uncore_cpu_init(void); 362 int snb_pci2phy_map_init(int devid); 363 364 /* perf_event_intel_uncore_snbep.c */ 365 int snbep_uncore_pci_init(void); 366 void snbep_uncore_cpu_init(void); 367 int ivbep_uncore_pci_init(void); 368 void ivbep_uncore_cpu_init(void); 369 int hswep_uncore_pci_init(void); 370 void hswep_uncore_cpu_init(void); 371 int bdx_uncore_pci_init(void); 372 void bdx_uncore_cpu_init(void); 373 int knl_uncore_pci_init(void); 374 void knl_uncore_cpu_init(void); 375 376 /* perf_event_intel_uncore_nhmex.c */ 377 void nhmex_uncore_cpu_init(void); 378