xref: /linux/arch/x86/events/intel/uncore_discovery.c (revision bcb48dd3b344592cc33732de640b99264c073df1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <asm/msr.h>
9 #include "uncore.h"
10 #include "uncore_discovery.h"
11 
12 static struct rb_root discovery_tables = RB_ROOT;
13 static int num_discovered_types[UNCORE_ACCESS_MAX];
14 
has_generic_discovery_table(void)15 static bool has_generic_discovery_table(void)
16 {
17 	struct pci_dev *dev;
18 	int dvsec;
19 
20 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
21 	if (!dev)
22 		return false;
23 
24 	/* A discovery table device has the unique capability ID. */
25 	dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
26 	pci_dev_put(dev);
27 	if (dvsec)
28 		return true;
29 
30 	return false;
31 }
32 
33 static int logical_die_id;
34 
get_device_die_id(struct pci_dev * dev)35 static int get_device_die_id(struct pci_dev *dev)
36 {
37 	int node = pcibus_to_node(dev->bus);
38 
39 	/*
40 	 * If the NUMA info is not available, assume that the logical die id is
41 	 * continuous in the order in which the discovery table devices are
42 	 * detected.
43 	 */
44 	if (node < 0)
45 		return logical_die_id++;
46 
47 	return uncore_device_to_die(dev);
48 }
49 
50 #define __node_2_type(cur)	\
51 	rb_entry((cur), struct intel_uncore_discovery_type, node)
52 
__type_cmp(const void * key,const struct rb_node * b)53 static inline int __type_cmp(const void *key, const struct rb_node *b)
54 {
55 	struct intel_uncore_discovery_type *type_b = __node_2_type(b);
56 	const u16 *type_id = key;
57 
58 	if (type_b->type > *type_id)
59 		return -1;
60 	else if (type_b->type < *type_id)
61 		return 1;
62 
63 	return 0;
64 }
65 
66 static inline struct intel_uncore_discovery_type *
search_uncore_discovery_type(u16 type_id)67 search_uncore_discovery_type(u16 type_id)
68 {
69 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
70 
71 	return (node) ? __node_2_type(node) : NULL;
72 }
73 
__type_less(struct rb_node * a,const struct rb_node * b)74 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
75 {
76 	return (__node_2_type(a)->type < __node_2_type(b)->type);
77 }
78 
79 static struct intel_uncore_discovery_type *
add_uncore_discovery_type(struct uncore_unit_discovery * unit)80 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
81 {
82 	struct intel_uncore_discovery_type *type;
83 
84 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
85 		pr_warn("Unsupported access type %d\n", unit->access_type);
86 		return NULL;
87 	}
88 
89 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
90 	if (!type)
91 		return NULL;
92 
93 	type->units = RB_ROOT;
94 
95 	type->access_type = unit->access_type;
96 	num_discovered_types[type->access_type]++;
97 	type->type = unit->box_type;
98 
99 	rb_add(&type->node, &discovery_tables, __type_less);
100 
101 	return type;
102 }
103 
104 static struct intel_uncore_discovery_type *
get_uncore_discovery_type(struct uncore_unit_discovery * unit)105 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
106 {
107 	struct intel_uncore_discovery_type *type;
108 
109 	type = search_uncore_discovery_type(unit->box_type);
110 	if (type)
111 		return type;
112 
113 	return add_uncore_discovery_type(unit);
114 }
115 
pmu_idx_cmp(const void * key,const struct rb_node * b)116 static inline int pmu_idx_cmp(const void *key, const struct rb_node *b)
117 {
118 	struct intel_uncore_discovery_unit *unit;
119 	const unsigned int *id = key;
120 
121 	unit = rb_entry(b, struct intel_uncore_discovery_unit, node);
122 
123 	if (unit->pmu_idx > *id)
124 		return -1;
125 	else if (unit->pmu_idx < *id)
126 		return 1;
127 
128 	return 0;
129 }
130 
131 static struct intel_uncore_discovery_unit *
intel_uncore_find_discovery_unit(struct rb_root * units,int die,unsigned int pmu_idx)132 intel_uncore_find_discovery_unit(struct rb_root *units, int die,
133 				 unsigned int pmu_idx)
134 {
135 	struct intel_uncore_discovery_unit *unit;
136 	struct rb_node *pos;
137 
138 	if (!units)
139 		return NULL;
140 
141 	pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp);
142 	if (!pos)
143 		return NULL;
144 	unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
145 
146 	if (die < 0)
147 		return unit;
148 
149 	for (; pos; pos = rb_next(pos)) {
150 		unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
151 
152 		if (unit->pmu_idx != pmu_idx)
153 			break;
154 
155 		if (unit->die == die)
156 			return unit;
157 	}
158 
159 	return NULL;
160 }
161 
intel_uncore_find_discovery_unit_id(struct rb_root * units,int die,unsigned int pmu_idx)162 int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
163 					unsigned int pmu_idx)
164 {
165 	struct intel_uncore_discovery_unit *unit;
166 
167 	unit = intel_uncore_find_discovery_unit(units, die, pmu_idx);
168 	if (unit)
169 		return unit->id;
170 
171 	return -1;
172 }
173 
unit_less(struct rb_node * a,const struct rb_node * b)174 static inline bool unit_less(struct rb_node *a, const struct rb_node *b)
175 {
176 	struct intel_uncore_discovery_unit *a_node, *b_node;
177 
178 	a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
179 	b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);
180 
181 	if (a_node->pmu_idx < b_node->pmu_idx)
182 		return true;
183 	if (a_node->pmu_idx > b_node->pmu_idx)
184 		return false;
185 
186 	if (a_node->die < b_node->die)
187 		return true;
188 	if (a_node->die > b_node->die)
189 		return false;
190 
191 	return 0;
192 }
193 
194 static inline struct intel_uncore_discovery_unit *
uncore_find_unit(struct rb_root * root,unsigned int id)195 uncore_find_unit(struct rb_root *root, unsigned int id)
196 {
197 	struct intel_uncore_discovery_unit *unit;
198 	struct rb_node *node;
199 
200 	for (node = rb_first(root); node; node = rb_next(node)) {
201 		unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
202 		if (unit->id == id)
203 			return unit;
204 	}
205 
206 	return NULL;
207 }
208 
uncore_find_add_unit(struct intel_uncore_discovery_unit * node,struct rb_root * root,u16 * num_units)209 void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
210 			  struct rb_root *root, u16 *num_units)
211 {
212 	struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);
213 
214 	if (unit)
215 		node->pmu_idx = unit->pmu_idx;
216 	else if (num_units)
217 		node->pmu_idx = (*num_units)++;
218 
219 	rb_add(&node->node, root, unit_less);
220 }
221 
222 static void
uncore_insert_box_info(struct uncore_unit_discovery * unit,int die)223 uncore_insert_box_info(struct uncore_unit_discovery *unit,
224 		       int die)
225 {
226 	struct intel_uncore_discovery_unit *node;
227 	struct intel_uncore_discovery_type *type;
228 
229 	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
230 		pr_info("Invalid address is detected for uncore type %d box %d, "
231 			"Disable the uncore unit.\n",
232 			unit->box_type, unit->box_id);
233 		return;
234 	}
235 
236 	node = kzalloc(sizeof(*node), GFP_KERNEL);
237 	if (!node)
238 		return;
239 
240 	node->die = die;
241 	node->id = unit->box_id;
242 	node->addr = unit->ctl;
243 
244 	type = get_uncore_discovery_type(unit);
245 	if (!type) {
246 		kfree(node);
247 		return;
248 	}
249 
250 	uncore_find_add_unit(node, &type->units, &type->num_units);
251 
252 	/* Store generic information for the first box */
253 	if (type->num_units == 1) {
254 		type->num_counters = unit->num_regs;
255 		type->counter_width = unit->bit_width;
256 		type->ctl_offset = unit->ctl_offset;
257 		type->ctr_offset = unit->ctr_offset;
258 	}
259 }
260 
261 static bool
uncore_ignore_unit(struct uncore_unit_discovery * unit,int * ignore)262 uncore_ignore_unit(struct uncore_unit_discovery *unit, int *ignore)
263 {
264 	int i;
265 
266 	if (!ignore)
267 		return false;
268 
269 	for (i = 0; ignore[i] != UNCORE_IGNORE_END ; i++) {
270 		if (unit->box_type == ignore[i])
271 			return true;
272 	}
273 
274 	return false;
275 }
276 
__parse_discovery_table(resource_size_t addr,int die,bool * parsed,int * ignore)277 static int __parse_discovery_table(resource_size_t addr, int die,
278 				   bool *parsed, int *ignore)
279 {
280 	struct uncore_global_discovery global;
281 	struct uncore_unit_discovery unit;
282 	void __iomem *io_addr;
283 	unsigned long size;
284 	int i;
285 
286 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
287 	io_addr = ioremap(addr, size);
288 	if (!io_addr)
289 		return -ENOMEM;
290 
291 	/* Read Global Discovery State */
292 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
293 	if (uncore_discovery_invalid_unit(global)) {
294 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
295 			global.table1, global.ctl, global.table3);
296 		iounmap(io_addr);
297 		return -EINVAL;
298 	}
299 	iounmap(io_addr);
300 
301 	size = (1 + global.max_units) * global.stride * 8;
302 	io_addr = ioremap(addr, size);
303 	if (!io_addr)
304 		return -ENOMEM;
305 
306 	/* Parsing Unit Discovery State */
307 	for (i = 0; i < global.max_units; i++) {
308 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
309 			      sizeof(struct uncore_unit_discovery));
310 
311 		if (uncore_discovery_invalid_unit(unit))
312 			continue;
313 
314 		if (unit.access_type >= UNCORE_ACCESS_MAX)
315 			continue;
316 
317 		if (uncore_ignore_unit(&unit, ignore))
318 			continue;
319 
320 		uncore_insert_box_info(&unit, die);
321 	}
322 
323 	*parsed = true;
324 	iounmap(io_addr);
325 	return 0;
326 }
327 
parse_discovery_table(struct pci_dev * dev,int die,u32 bar_offset,bool * parsed,int * ignore)328 static int parse_discovery_table(struct pci_dev *dev, int die,
329 				 u32 bar_offset, bool *parsed,
330 				 int *ignore)
331 {
332 	resource_size_t addr;
333 	u32 val;
334 
335 	pci_read_config_dword(dev, bar_offset, &val);
336 
337 	if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
338 		return -EINVAL;
339 
340 	addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
341 #ifdef CONFIG_PHYS_ADDR_T_64BIT
342 	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
343 		u32 val2;
344 
345 		pci_read_config_dword(dev, bar_offset + 4, &val2);
346 		addr |= ((resource_size_t)val2) << 32;
347 	}
348 #endif
349 
350 	return __parse_discovery_table(addr, die, parsed, ignore);
351 }
352 
intel_uncore_has_discovery_tables_pci(int * ignore)353 static bool intel_uncore_has_discovery_tables_pci(int *ignore)
354 {
355 	u32 device, val, entry_id, bar_offset;
356 	int die, dvsec = 0, ret = true;
357 	struct pci_dev *dev = NULL;
358 	bool parsed = false;
359 
360 	if (has_generic_discovery_table())
361 		device = UNCORE_DISCOVERY_TABLE_DEVICE;
362 	else
363 		device = PCI_ANY_ID;
364 
365 	/*
366 	 * Start a new search and iterates through the list of
367 	 * the discovery table devices.
368 	 */
369 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
370 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
371 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
372 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
373 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
374 				continue;
375 
376 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
377 
378 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
379 				ret = false;
380 				goto err;
381 			}
382 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
383 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
384 
385 			die = get_device_die_id(dev);
386 			if (die < 0)
387 				continue;
388 
389 			parse_discovery_table(dev, die, bar_offset, &parsed, ignore);
390 		}
391 	}
392 
393 	/* None of the discovery tables are available */
394 	if (!parsed)
395 		ret = false;
396 err:
397 	pci_dev_put(dev);
398 
399 	return ret;
400 }
401 
intel_uncore_has_discovery_tables_msr(int * ignore)402 static bool intel_uncore_has_discovery_tables_msr(int *ignore)
403 {
404 	unsigned long *die_mask;
405 	bool parsed = false;
406 	int cpu, die;
407 	u64 base;
408 
409 	die_mask = kcalloc(BITS_TO_LONGS(uncore_max_dies()),
410 			   sizeof(unsigned long), GFP_KERNEL);
411 	if (!die_mask)
412 		return false;
413 
414 	cpus_read_lock();
415 	for_each_online_cpu(cpu) {
416 		die = topology_logical_die_id(cpu);
417 		if (__test_and_set_bit(die, die_mask))
418 			continue;
419 
420 		if (rdmsrq_safe_on_cpu(cpu, UNCORE_DISCOVERY_MSR, &base))
421 			continue;
422 
423 		if (!base)
424 			continue;
425 
426 		__parse_discovery_table(base, die, &parsed, ignore);
427 	}
428 
429 	cpus_read_unlock();
430 
431 	kfree(die_mask);
432 	return parsed;
433 }
434 
intel_uncore_has_discovery_tables(int * ignore)435 bool intel_uncore_has_discovery_tables(int *ignore)
436 {
437 	return intel_uncore_has_discovery_tables_msr(ignore) ||
438 	       intel_uncore_has_discovery_tables_pci(ignore);
439 }
440 
intel_uncore_clear_discovery_tables(void)441 void intel_uncore_clear_discovery_tables(void)
442 {
443 	struct intel_uncore_discovery_type *type, *next;
444 	struct intel_uncore_discovery_unit *pos;
445 	struct rb_node *node;
446 
447 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
448 		while (!RB_EMPTY_ROOT(&type->units)) {
449 			node = rb_first(&type->units);
450 			pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
451 			rb_erase(node, &type->units);
452 			kfree(pos);
453 		}
454 		kfree(type);
455 	}
456 }
457 
458 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
459 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
462 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
463 
464 static struct attribute *generic_uncore_formats_attr[] = {
465 	&format_attr_event.attr,
466 	&format_attr_umask.attr,
467 	&format_attr_edge.attr,
468 	&format_attr_inv.attr,
469 	&format_attr_thresh.attr,
470 	NULL,
471 };
472 
473 static const struct attribute_group generic_uncore_format_group = {
474 	.name = "format",
475 	.attrs = generic_uncore_formats_attr,
476 };
477 
intel_generic_uncore_box_ctl(struct intel_uncore_box * box)478 static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
479 {
480 	struct intel_uncore_discovery_unit *unit;
481 
482 	unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes,
483 						-1, box->pmu->pmu_idx);
484 	if (WARN_ON_ONCE(!unit))
485 		return 0;
486 
487 	return unit->addr;
488 }
489 
intel_generic_uncore_msr_init_box(struct intel_uncore_box * box)490 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
491 {
492 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
493 }
494 
intel_generic_uncore_msr_disable_box(struct intel_uncore_box * box)495 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
496 {
497 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
498 }
499 
intel_generic_uncore_msr_enable_box(struct intel_uncore_box * box)500 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
501 {
502 	wrmsrq(intel_generic_uncore_box_ctl(box), 0);
503 }
504 
intel_generic_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)505 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
506 					    struct perf_event *event)
507 {
508 	struct hw_perf_event *hwc = &event->hw;
509 
510 	wrmsrq(hwc->config_base, hwc->config);
511 }
512 
intel_generic_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)513 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
514 					     struct perf_event *event)
515 {
516 	struct hw_perf_event *hwc = &event->hw;
517 
518 	wrmsrq(hwc->config_base, 0);
519 }
520 
521 static struct intel_uncore_ops generic_uncore_msr_ops = {
522 	.init_box		= intel_generic_uncore_msr_init_box,
523 	.disable_box		= intel_generic_uncore_msr_disable_box,
524 	.enable_box		= intel_generic_uncore_msr_enable_box,
525 	.disable_event		= intel_generic_uncore_msr_disable_event,
526 	.enable_event		= intel_generic_uncore_msr_enable_event,
527 	.read_counter		= uncore_msr_read_counter,
528 };
529 
intel_generic_uncore_assign_hw_event(struct perf_event * event,struct intel_uncore_box * box)530 bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
531 					  struct intel_uncore_box *box)
532 {
533 	struct hw_perf_event *hwc = &event->hw;
534 	u64 box_ctl;
535 
536 	if (!box->pmu->type->boxes)
537 		return false;
538 
539 	if (box->io_addr) {
540 		hwc->config_base = uncore_pci_event_ctl(box, hwc->idx);
541 		hwc->event_base  = uncore_pci_perf_ctr(box, hwc->idx);
542 		return true;
543 	}
544 
545 	box_ctl = intel_generic_uncore_box_ctl(box);
546 	if (!box_ctl)
547 		return false;
548 
549 	if (box->pci_dev) {
550 		box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl);
551 		hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx);
552 		hwc->event_base  = box_ctl + uncore_pci_perf_ctr(box, hwc->idx);
553 		return true;
554 	}
555 
556 	hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx;
557 	hwc->event_base  = box_ctl + box->pmu->type->perf_ctr + hwc->idx;
558 
559 	return true;
560 }
561 
intel_pci_uncore_box_ctl(struct intel_uncore_box * box)562 static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box)
563 {
564 	return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box));
565 }
566 
intel_generic_uncore_pci_init_box(struct intel_uncore_box * box)567 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
568 {
569 	struct pci_dev *pdev = box->pci_dev;
570 	int box_ctl = intel_pci_uncore_box_ctl(box);
571 
572 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
573 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
574 }
575 
intel_generic_uncore_pci_disable_box(struct intel_uncore_box * box)576 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
577 {
578 	struct pci_dev *pdev = box->pci_dev;
579 	int box_ctl = intel_pci_uncore_box_ctl(box);
580 
581 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
582 }
583 
intel_generic_uncore_pci_enable_box(struct intel_uncore_box * box)584 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
585 {
586 	struct pci_dev *pdev = box->pci_dev;
587 	int box_ctl = intel_pci_uncore_box_ctl(box);
588 
589 	pci_write_config_dword(pdev, box_ctl, 0);
590 }
591 
intel_generic_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)592 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
593 					    struct perf_event *event)
594 {
595 	struct pci_dev *pdev = box->pci_dev;
596 	struct hw_perf_event *hwc = &event->hw;
597 
598 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
599 }
600 
intel_generic_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)601 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
602 					    struct perf_event *event)
603 {
604 	struct pci_dev *pdev = box->pci_dev;
605 	struct hw_perf_event *hwc = &event->hw;
606 
607 	pci_write_config_dword(pdev, hwc->config_base, 0);
608 }
609 
intel_generic_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)610 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
611 					  struct perf_event *event)
612 {
613 	struct pci_dev *pdev = box->pci_dev;
614 	struct hw_perf_event *hwc = &event->hw;
615 	u64 count = 0;
616 
617 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
618 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
619 
620 	return count;
621 }
622 
623 static struct intel_uncore_ops generic_uncore_pci_ops = {
624 	.init_box	= intel_generic_uncore_pci_init_box,
625 	.disable_box	= intel_generic_uncore_pci_disable_box,
626 	.enable_box	= intel_generic_uncore_pci_enable_box,
627 	.disable_event	= intel_generic_uncore_pci_disable_event,
628 	.enable_event	= intel_generic_uncore_pci_enable_event,
629 	.read_counter	= intel_generic_uncore_pci_read_counter,
630 };
631 
632 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
633 
intel_generic_uncore_mmio_init_box(struct intel_uncore_box * box)634 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
635 {
636 	static struct intel_uncore_discovery_unit *unit;
637 	struct intel_uncore_type *type = box->pmu->type;
638 	resource_size_t addr;
639 
640 	unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx);
641 	if (!unit) {
642 		pr_warn("Uncore type %d id %d: Cannot find box control address.\n",
643 			type->type_id, box->pmu->pmu_idx);
644 		return;
645 	}
646 
647 	if (!unit->addr) {
648 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
649 			type->type_id, unit->id);
650 		return;
651 	}
652 
653 	addr = unit->addr;
654 	box->io_addr = ioremap(addr, type->mmio_map_size);
655 	if (!box->io_addr) {
656 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
657 			type->type_id, unit->id, (unsigned long long)addr);
658 		return;
659 	}
660 
661 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
662 }
663 
intel_generic_uncore_mmio_disable_box(struct intel_uncore_box * box)664 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
665 {
666 	if (!box->io_addr)
667 		return;
668 
669 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
670 }
671 
intel_generic_uncore_mmio_enable_box(struct intel_uncore_box * box)672 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
673 {
674 	if (!box->io_addr)
675 		return;
676 
677 	writel(0, box->io_addr);
678 }
679 
intel_generic_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)680 void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
681 					    struct perf_event *event)
682 {
683 	struct hw_perf_event *hwc = &event->hw;
684 
685 	if (!box->io_addr)
686 		return;
687 
688 	writel(hwc->config, box->io_addr + hwc->config_base);
689 }
690 
intel_generic_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)691 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
692 					     struct perf_event *event)
693 {
694 	struct hw_perf_event *hwc = &event->hw;
695 
696 	if (!box->io_addr)
697 		return;
698 
699 	writel(0, box->io_addr + hwc->config_base);
700 }
701 
702 static struct intel_uncore_ops generic_uncore_mmio_ops = {
703 	.init_box	= intel_generic_uncore_mmio_init_box,
704 	.exit_box	= uncore_mmio_exit_box,
705 	.disable_box	= intel_generic_uncore_mmio_disable_box,
706 	.enable_box	= intel_generic_uncore_mmio_enable_box,
707 	.disable_event	= intel_generic_uncore_mmio_disable_event,
708 	.enable_event	= intel_generic_uncore_mmio_enable_event,
709 	.read_counter	= uncore_mmio_read_counter,
710 };
711 
uncore_update_uncore_type(enum uncore_access_type type_id,struct intel_uncore_type * uncore,struct intel_uncore_discovery_type * type)712 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
713 				      struct intel_uncore_type *uncore,
714 				      struct intel_uncore_discovery_type *type)
715 {
716 	uncore->type_id = type->type;
717 	uncore->num_counters = type->num_counters;
718 	uncore->perf_ctr_bits = type->counter_width;
719 	uncore->perf_ctr = (unsigned int)type->ctr_offset;
720 	uncore->event_ctl = (unsigned int)type->ctl_offset;
721 	uncore->boxes = &type->units;
722 	uncore->num_boxes = type->num_units;
723 
724 	switch (type_id) {
725 	case UNCORE_ACCESS_MSR:
726 		uncore->ops = &generic_uncore_msr_ops;
727 		break;
728 	case UNCORE_ACCESS_PCI:
729 		uncore->ops = &generic_uncore_pci_ops;
730 		break;
731 	case UNCORE_ACCESS_MMIO:
732 		uncore->ops = &generic_uncore_mmio_ops;
733 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
734 		break;
735 	default:
736 		return false;
737 	}
738 
739 	return true;
740 }
741 
742 struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id,int num_extra)743 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
744 {
745 	struct intel_uncore_discovery_type *type;
746 	struct intel_uncore_type **uncores;
747 	struct intel_uncore_type *uncore;
748 	struct rb_node *node;
749 	int i = 0;
750 
751 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
752 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
753 	if (!uncores)
754 		return empty_uncore;
755 
756 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
757 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
758 		if (type->access_type != type_id)
759 			continue;
760 
761 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
762 		if (!uncore)
763 			break;
764 
765 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
766 		uncore->format_group = &generic_uncore_format_group;
767 
768 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
769 			kfree(uncore);
770 			continue;
771 		}
772 		uncores[i++] = uncore;
773 	}
774 
775 	return uncores;
776 }
777 
intel_uncore_generic_uncore_cpu_init(void)778 void intel_uncore_generic_uncore_cpu_init(void)
779 {
780 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
781 }
782 
intel_uncore_generic_uncore_pci_init(void)783 int intel_uncore_generic_uncore_pci_init(void)
784 {
785 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
786 
787 	return 0;
788 }
789 
intel_uncore_generic_uncore_mmio_init(void)790 void intel_uncore_generic_uncore_mmio_init(void)
791 {
792 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
793 }
794