xref: /linux/arch/x86/events/intel/uncore_discovery.c (revision b575fc0e33574f3a476b68057e340ebe32d7b750)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <asm/msr.h>
9 #include "uncore.h"
10 #include "uncore_discovery.h"
11 
12 static struct rb_root discovery_tables = RB_ROOT;
13 static int num_discovered_types[UNCORE_ACCESS_MAX];
14 
15 static int logical_die_id;
16 
17 static int get_device_die_id(struct pci_dev *dev)
18 {
19 	int node = pcibus_to_node(dev->bus);
20 
21 	/*
22 	 * If the NUMA info is not available, assume that the logical die id is
23 	 * continuous in the order in which the discovery table devices are
24 	 * detected.
25 	 */
26 	if (node < 0)
27 		return logical_die_id++;
28 
29 	return uncore_device_to_die(dev);
30 }
31 
32 #define __node_2_type(cur)	\
33 	rb_entry((cur), struct intel_uncore_discovery_type, node)
34 
35 static inline int __type_cmp(const void *key, const struct rb_node *b)
36 {
37 	const struct intel_uncore_discovery_type *type_b = __node_2_type(b);
38 	const u16 *type_id = key;
39 
40 	if (type_b->type > *type_id)
41 		return -1;
42 	else if (type_b->type < *type_id)
43 		return 1;
44 
45 	return 0;
46 }
47 
48 static inline struct intel_uncore_discovery_type *
49 search_uncore_discovery_type(u16 type_id)
50 {
51 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
52 
53 	return (node) ? __node_2_type(node) : NULL;
54 }
55 
56 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
57 {
58 	return (__node_2_type(a)->type < __node_2_type(b)->type);
59 }
60 
61 static struct intel_uncore_discovery_type *
62 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
63 {
64 	struct intel_uncore_discovery_type *type;
65 
66 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
67 		pr_warn("Unsupported access type %d\n", unit->access_type);
68 		return NULL;
69 	}
70 
71 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
72 	if (!type)
73 		return NULL;
74 
75 	type->units = RB_ROOT;
76 
77 	type->access_type = unit->access_type;
78 	num_discovered_types[type->access_type]++;
79 	type->type = unit->box_type;
80 
81 	rb_add(&type->node, &discovery_tables, __type_less);
82 
83 	return type;
84 }
85 
86 static struct intel_uncore_discovery_type *
87 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
88 {
89 	struct intel_uncore_discovery_type *type;
90 
91 	type = search_uncore_discovery_type(unit->box_type);
92 	if (type)
93 		return type;
94 
95 	return add_uncore_discovery_type(unit);
96 }
97 
98 static inline int pmu_idx_cmp(const void *key, const struct rb_node *b)
99 {
100 	const struct intel_uncore_discovery_unit *unit;
101 	const unsigned int *id = key;
102 
103 	unit = rb_entry(b, struct intel_uncore_discovery_unit, node);
104 
105 	if (unit->pmu_idx > *id)
106 		return -1;
107 	else if (unit->pmu_idx < *id)
108 		return 1;
109 
110 	return 0;
111 }
112 
113 static struct intel_uncore_discovery_unit *
114 intel_uncore_find_discovery_unit(struct rb_root *units, int die,
115 				 unsigned int pmu_idx)
116 {
117 	struct intel_uncore_discovery_unit *unit;
118 	struct rb_node *pos;
119 
120 	if (!units)
121 		return NULL;
122 
123 	pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp);
124 	if (!pos)
125 		return NULL;
126 	unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
127 
128 	if (die < 0)
129 		return unit;
130 
131 	for (; pos; pos = rb_next(pos)) {
132 		unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
133 
134 		if (unit->pmu_idx != pmu_idx)
135 			break;
136 
137 		if (unit->die == die)
138 			return unit;
139 	}
140 
141 	return NULL;
142 }
143 
144 int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
145 					unsigned int pmu_idx)
146 {
147 	struct intel_uncore_discovery_unit *unit;
148 
149 	unit = intel_uncore_find_discovery_unit(units, die, pmu_idx);
150 	if (unit)
151 		return unit->id;
152 
153 	return -1;
154 }
155 
156 static inline bool unit_less(struct rb_node *a, const struct rb_node *b)
157 {
158 	const struct intel_uncore_discovery_unit *a_node, *b_node;
159 
160 	a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
161 	b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);
162 
163 	if (a_node->pmu_idx < b_node->pmu_idx)
164 		return true;
165 	if (a_node->pmu_idx > b_node->pmu_idx)
166 		return false;
167 
168 	if (a_node->die < b_node->die)
169 		return true;
170 	if (a_node->die > b_node->die)
171 		return false;
172 
173 	return 0;
174 }
175 
176 static inline struct intel_uncore_discovery_unit *
177 uncore_find_unit(struct rb_root *root, unsigned int id)
178 {
179 	struct intel_uncore_discovery_unit *unit;
180 	struct rb_node *node;
181 
182 	for (node = rb_first(root); node; node = rb_next(node)) {
183 		unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
184 		if (unit->id == id)
185 			return unit;
186 	}
187 
188 	return NULL;
189 }
190 
191 void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
192 			  struct rb_root *root, u16 *num_units)
193 {
194 	struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);
195 
196 	if (unit)
197 		node->pmu_idx = unit->pmu_idx;
198 	else if (num_units)
199 		node->pmu_idx = (*num_units)++;
200 
201 	rb_add(&node->node, root, unit_less);
202 }
203 
204 static void
205 uncore_insert_box_info(struct uncore_unit_discovery *unit,
206 		       int die)
207 {
208 	struct intel_uncore_discovery_unit *node;
209 	struct intel_uncore_discovery_type *type;
210 
211 	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
212 		pr_info("Invalid address is detected for uncore type %d box %d, "
213 			"Disable the uncore unit.\n",
214 			unit->box_type, unit->box_id);
215 		return;
216 	}
217 
218 	node = kzalloc(sizeof(*node), GFP_KERNEL);
219 	if (!node)
220 		return;
221 
222 	node->die = die;
223 	node->id = unit->box_id;
224 	node->addr = unit->ctl;
225 
226 	type = get_uncore_discovery_type(unit);
227 	if (!type) {
228 		kfree(node);
229 		return;
230 	}
231 
232 	uncore_find_add_unit(node, &type->units, &type->num_units);
233 
234 	/* Store generic information for the first box */
235 	if (type->num_units == 1) {
236 		type->num_counters = unit->num_regs;
237 		type->counter_width = unit->bit_width;
238 		type->ctl_offset = unit->ctl_offset;
239 		type->ctr_offset = unit->ctr_offset;
240 	}
241 }
242 
243 static bool
244 uncore_ignore_unit(struct uncore_unit_discovery *unit,
245 		   struct uncore_discovery_domain *domain)
246 {
247 	int i;
248 
249 	if (!domain || !domain->units_ignore)
250 		return false;
251 
252 	for (i = 0; domain->units_ignore[i] != UNCORE_IGNORE_END ; i++) {
253 		if (unit->box_type == domain->units_ignore[i])
254 			return true;
255 	}
256 
257 	return false;
258 }
259 
260 static int __parse_discovery_table(struct uncore_discovery_domain *domain,
261 				   resource_size_t addr, int die, bool *parsed)
262 {
263 	struct uncore_global_discovery global;
264 	struct uncore_unit_discovery unit;
265 	void __iomem *io_addr;
266 	unsigned long size;
267 	int i;
268 
269 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
270 	io_addr = ioremap(addr, size);
271 	if (!io_addr)
272 		return -ENOMEM;
273 
274 	/* Read Global Discovery State */
275 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
276 	if (uncore_discovery_invalid_unit(global)) {
277 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
278 			global.table1, global.ctl, global.table3);
279 		iounmap(io_addr);
280 		return -EINVAL;
281 	}
282 	iounmap(io_addr);
283 
284 	size = (1 + global.max_units) * global.stride * 8;
285 	io_addr = ioremap(addr, size);
286 	if (!io_addr)
287 		return -ENOMEM;
288 
289 	if (domain->global_init && domain->global_init(global.ctl))
290 		return -ENODEV;
291 
292 	/* Parsing Unit Discovery State */
293 	for (i = 0; i < global.max_units; i++) {
294 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
295 			      sizeof(struct uncore_unit_discovery));
296 
297 		if (uncore_discovery_invalid_unit(unit))
298 			continue;
299 
300 		if (unit.access_type >= UNCORE_ACCESS_MAX)
301 			continue;
302 
303 		if (uncore_ignore_unit(&unit, domain))
304 			continue;
305 
306 		uncore_insert_box_info(&unit, die);
307 	}
308 
309 	*parsed = true;
310 	iounmap(io_addr);
311 	return 0;
312 }
313 
314 static int parse_discovery_table(struct uncore_discovery_domain *domain,
315 				 struct pci_dev *dev, int die,
316 				 u32 bar_offset, bool *parsed)
317 {
318 	resource_size_t addr;
319 	u32 val;
320 
321 	pci_read_config_dword(dev, bar_offset, &val);
322 
323 	if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
324 		return -EINVAL;
325 
326 	addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
327 #ifdef CONFIG_PHYS_ADDR_T_64BIT
328 	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
329 		u32 val2;
330 
331 		pci_read_config_dword(dev, bar_offset + 4, &val2);
332 		addr |= ((resource_size_t)val2) << 32;
333 	}
334 #endif
335 
336 	return __parse_discovery_table(domain, addr, die, parsed);
337 }
338 
339 static bool uncore_discovery_pci(struct uncore_discovery_domain *domain)
340 {
341 	u32 device, val, entry_id, bar_offset;
342 	int die, dvsec = 0, ret = true;
343 	struct pci_dev *dev = NULL;
344 	bool parsed = false;
345 
346 	device = domain->discovery_base;
347 
348 	/*
349 	 * Start a new search and iterates through the list of
350 	 * the discovery table devices.
351 	 */
352 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
353 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
354 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
355 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
356 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
357 				continue;
358 
359 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
360 
361 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
362 				ret = false;
363 				goto err;
364 			}
365 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
366 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
367 
368 			die = get_device_die_id(dev);
369 			if (die < 0)
370 				continue;
371 
372 			parse_discovery_table(domain, dev, die, bar_offset, &parsed);
373 		}
374 	}
375 
376 	/* None of the discovery tables are available */
377 	if (!parsed)
378 		ret = false;
379 err:
380 	pci_dev_put(dev);
381 
382 	return ret;
383 }
384 
385 static bool uncore_discovery_msr(struct uncore_discovery_domain *domain)
386 {
387 	unsigned long *die_mask;
388 	bool parsed = false;
389 	int cpu, die;
390 	u64 base;
391 
392 	die_mask = kcalloc(BITS_TO_LONGS(uncore_max_dies()),
393 			   sizeof(unsigned long), GFP_KERNEL);
394 	if (!die_mask)
395 		return false;
396 
397 	cpus_read_lock();
398 	for_each_online_cpu(cpu) {
399 		die = topology_logical_die_id(cpu);
400 		if (__test_and_set_bit(die, die_mask))
401 			continue;
402 
403 		if (rdmsrq_safe_on_cpu(cpu, domain->discovery_base, &base))
404 			continue;
405 
406 		if (!base)
407 			continue;
408 
409 		__parse_discovery_table(domain, base, die, &parsed);
410 	}
411 
412 	cpus_read_unlock();
413 
414 	kfree(die_mask);
415 	return parsed;
416 }
417 
418 bool uncore_discovery(struct uncore_plat_init *init)
419 {
420 	struct uncore_discovery_domain *domain;
421 	bool ret = false;
422 	int i;
423 
424 	for (i = 0; i < UNCORE_DISCOVERY_DOMAINS; i++) {
425 		domain = &init->domain[i];
426 		if (domain->discovery_base) {
427 			if (!domain->base_is_pci)
428 				ret |= uncore_discovery_msr(domain);
429 			else
430 				ret |= uncore_discovery_pci(domain);
431 		}
432 	}
433 
434 	return ret;
435 }
436 
437 void intel_uncore_clear_discovery_tables(void)
438 {
439 	struct intel_uncore_discovery_type *type, *next;
440 	struct intel_uncore_discovery_unit *pos;
441 	struct rb_node *node;
442 
443 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
444 		while (!RB_EMPTY_ROOT(&type->units)) {
445 			node = rb_first(&type->units);
446 			pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
447 			rb_erase(node, &type->units);
448 			kfree(pos);
449 		}
450 		kfree(type);
451 	}
452 }
453 
454 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
455 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
456 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
457 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
458 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
459 
460 static struct attribute *generic_uncore_formats_attr[] = {
461 	&format_attr_event.attr,
462 	&format_attr_umask.attr,
463 	&format_attr_edge.attr,
464 	&format_attr_inv.attr,
465 	&format_attr_thresh.attr,
466 	NULL,
467 };
468 
469 static const struct attribute_group generic_uncore_format_group = {
470 	.name = "format",
471 	.attrs = generic_uncore_formats_attr,
472 };
473 
474 static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
475 {
476 	struct intel_uncore_discovery_unit *unit;
477 
478 	unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes,
479 						-1, box->pmu->pmu_idx);
480 	if (WARN_ON_ONCE(!unit))
481 		return 0;
482 
483 	return unit->addr;
484 }
485 
486 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
487 {
488 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
489 }
490 
491 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
492 {
493 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
494 }
495 
496 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
497 {
498 	wrmsrq(intel_generic_uncore_box_ctl(box), 0);
499 }
500 
501 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
502 					    struct perf_event *event)
503 {
504 	struct hw_perf_event *hwc = &event->hw;
505 
506 	wrmsrq(hwc->config_base, hwc->config);
507 }
508 
509 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
510 					     struct perf_event *event)
511 {
512 	struct hw_perf_event *hwc = &event->hw;
513 
514 	wrmsrq(hwc->config_base, 0);
515 }
516 
517 static struct intel_uncore_ops generic_uncore_msr_ops = {
518 	.init_box		= intel_generic_uncore_msr_init_box,
519 	.disable_box		= intel_generic_uncore_msr_disable_box,
520 	.enable_box		= intel_generic_uncore_msr_enable_box,
521 	.disable_event		= intel_generic_uncore_msr_disable_event,
522 	.enable_event		= intel_generic_uncore_msr_enable_event,
523 	.read_counter		= uncore_msr_read_counter,
524 };
525 
526 bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
527 					  struct intel_uncore_box *box)
528 {
529 	struct hw_perf_event *hwc = &event->hw;
530 	u64 box_ctl;
531 
532 	if (!box->pmu->type->boxes)
533 		return false;
534 
535 	if (box->io_addr) {
536 		hwc->config_base = uncore_pci_event_ctl(box, hwc->idx);
537 		hwc->event_base  = uncore_pci_perf_ctr(box, hwc->idx);
538 		return true;
539 	}
540 
541 	box_ctl = intel_generic_uncore_box_ctl(box);
542 	if (!box_ctl)
543 		return false;
544 
545 	if (box->pci_dev) {
546 		box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl);
547 		hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx);
548 		hwc->event_base  = box_ctl + uncore_pci_perf_ctr(box, hwc->idx);
549 		return true;
550 	}
551 
552 	hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx;
553 	hwc->event_base  = box_ctl + box->pmu->type->perf_ctr + hwc->idx;
554 
555 	return true;
556 }
557 
558 static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box)
559 {
560 	return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box));
561 }
562 
563 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
564 {
565 	struct pci_dev *pdev = box->pci_dev;
566 	int box_ctl = intel_pci_uncore_box_ctl(box);
567 
568 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
569 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
570 }
571 
572 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
573 {
574 	struct pci_dev *pdev = box->pci_dev;
575 	int box_ctl = intel_pci_uncore_box_ctl(box);
576 
577 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
578 }
579 
580 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
581 {
582 	struct pci_dev *pdev = box->pci_dev;
583 	int box_ctl = intel_pci_uncore_box_ctl(box);
584 
585 	pci_write_config_dword(pdev, box_ctl, 0);
586 }
587 
588 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
589 					    struct perf_event *event)
590 {
591 	struct pci_dev *pdev = box->pci_dev;
592 	struct hw_perf_event *hwc = &event->hw;
593 
594 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
595 }
596 
597 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
598 					    struct perf_event *event)
599 {
600 	struct pci_dev *pdev = box->pci_dev;
601 	struct hw_perf_event *hwc = &event->hw;
602 
603 	pci_write_config_dword(pdev, hwc->config_base, 0);
604 }
605 
606 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
607 					  struct perf_event *event)
608 {
609 	struct pci_dev *pdev = box->pci_dev;
610 	struct hw_perf_event *hwc = &event->hw;
611 	u64 count = 0;
612 
613 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
614 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
615 
616 	return count;
617 }
618 
619 static struct intel_uncore_ops generic_uncore_pci_ops = {
620 	.init_box	= intel_generic_uncore_pci_init_box,
621 	.disable_box	= intel_generic_uncore_pci_disable_box,
622 	.enable_box	= intel_generic_uncore_pci_enable_box,
623 	.disable_event	= intel_generic_uncore_pci_disable_event,
624 	.enable_event	= intel_generic_uncore_pci_enable_event,
625 	.read_counter	= intel_generic_uncore_pci_read_counter,
626 };
627 
628 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
629 
630 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
631 {
632 	static struct intel_uncore_discovery_unit *unit;
633 	struct intel_uncore_type *type = box->pmu->type;
634 	resource_size_t addr;
635 
636 	unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx);
637 	if (!unit) {
638 		pr_warn("Uncore type %d id %d: Cannot find box control address.\n",
639 			type->type_id, box->pmu->pmu_idx);
640 		return;
641 	}
642 
643 	if (!unit->addr) {
644 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
645 			type->type_id, unit->id);
646 		return;
647 	}
648 
649 	addr = unit->addr;
650 	box->io_addr = ioremap(addr, type->mmio_map_size);
651 	if (!box->io_addr) {
652 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
653 			type->type_id, unit->id, (unsigned long long)addr);
654 		return;
655 	}
656 
657 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
658 }
659 
660 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
661 {
662 	if (!box->io_addr)
663 		return;
664 
665 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
666 }
667 
668 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
669 {
670 	if (!box->io_addr)
671 		return;
672 
673 	writel(0, box->io_addr);
674 }
675 
676 void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
677 					    struct perf_event *event)
678 {
679 	struct hw_perf_event *hwc = &event->hw;
680 
681 	if (!box->io_addr)
682 		return;
683 
684 	writel(hwc->config, box->io_addr + hwc->config_base);
685 }
686 
687 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
688 					     struct perf_event *event)
689 {
690 	struct hw_perf_event *hwc = &event->hw;
691 
692 	if (!box->io_addr)
693 		return;
694 
695 	writel(0, box->io_addr + hwc->config_base);
696 }
697 
698 static struct intel_uncore_ops generic_uncore_mmio_ops = {
699 	.init_box	= intel_generic_uncore_mmio_init_box,
700 	.exit_box	= uncore_mmio_exit_box,
701 	.disable_box	= intel_generic_uncore_mmio_disable_box,
702 	.enable_box	= intel_generic_uncore_mmio_enable_box,
703 	.disable_event	= intel_generic_uncore_mmio_disable_event,
704 	.enable_event	= intel_generic_uncore_mmio_enable_event,
705 	.read_counter	= uncore_mmio_read_counter,
706 };
707 
708 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
709 				      struct intel_uncore_type *uncore,
710 				      struct intel_uncore_discovery_type *type)
711 {
712 	uncore->type_id = type->type;
713 	uncore->num_counters = type->num_counters;
714 	uncore->perf_ctr_bits = type->counter_width;
715 	uncore->perf_ctr = (unsigned int)type->ctr_offset;
716 	uncore->event_ctl = (unsigned int)type->ctl_offset;
717 	uncore->boxes = &type->units;
718 	uncore->num_boxes = type->num_units;
719 
720 	switch (type_id) {
721 	case UNCORE_ACCESS_MSR:
722 		uncore->ops = &generic_uncore_msr_ops;
723 		break;
724 	case UNCORE_ACCESS_PCI:
725 		uncore->ops = &generic_uncore_pci_ops;
726 		break;
727 	case UNCORE_ACCESS_MMIO:
728 		uncore->ops = &generic_uncore_mmio_ops;
729 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
730 		break;
731 	default:
732 		return false;
733 	}
734 
735 	return true;
736 }
737 
738 struct intel_uncore_type **
739 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
740 {
741 	struct intel_uncore_discovery_type *type;
742 	struct intel_uncore_type **uncores;
743 	struct intel_uncore_type *uncore;
744 	struct rb_node *node;
745 	int i = 0;
746 
747 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
748 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
749 	if (!uncores)
750 		return empty_uncore;
751 
752 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
753 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
754 		if (type->access_type != type_id)
755 			continue;
756 
757 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
758 		if (!uncore)
759 			break;
760 
761 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
762 		uncore->format_group = &generic_uncore_format_group;
763 
764 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
765 			kfree(uncore);
766 			continue;
767 		}
768 		uncores[i++] = uncore;
769 	}
770 
771 	return uncores;
772 }
773 
774 void intel_uncore_generic_uncore_cpu_init(void)
775 {
776 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
777 }
778 
779 int intel_uncore_generic_uncore_pci_init(void)
780 {
781 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
782 
783 	return 0;
784 }
785 
786 void intel_uncore_generic_uncore_mmio_init(void)
787 {
788 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
789 }
790