xref: /linux/arch/x86/events/intel/uncore_discovery.c (revision df9c299371054cb725eef730fd0f1d0fe2ed6bb0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Support Intel uncore PerfMon discovery mechanism.
4  * Copyright(c) 2021 Intel Corporation.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <asm/msr.h>
9 #include "uncore.h"
10 #include "uncore_discovery.h"
11 
12 static struct rb_root discovery_tables = RB_ROOT;
13 static int num_discovered_types[UNCORE_ACCESS_MAX];
14 
15 static bool has_generic_discovery_table(void)
16 {
17 	struct pci_dev *dev;
18 	int dvsec;
19 
20 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
21 	if (!dev)
22 		return false;
23 
24 	/* A discovery table device has the unique capability ID. */
25 	dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
26 	pci_dev_put(dev);
27 	if (dvsec)
28 		return true;
29 
30 	return false;
31 }
32 
33 static int logical_die_id;
34 
35 static int get_device_die_id(struct pci_dev *dev)
36 {
37 	int node = pcibus_to_node(dev->bus);
38 
39 	/*
40 	 * If the NUMA info is not available, assume that the logical die id is
41 	 * continuous in the order in which the discovery table devices are
42 	 * detected.
43 	 */
44 	if (node < 0)
45 		return logical_die_id++;
46 
47 	return uncore_device_to_die(dev);
48 }
49 
50 #define __node_2_type(cur)	\
51 	rb_entry((cur), struct intel_uncore_discovery_type, node)
52 
53 static inline int __type_cmp(const void *key, const struct rb_node *b)
54 {
55 	struct intel_uncore_discovery_type *type_b = __node_2_type(b);
56 	const u16 *type_id = key;
57 
58 	if (type_b->type > *type_id)
59 		return -1;
60 	else if (type_b->type < *type_id)
61 		return 1;
62 
63 	return 0;
64 }
65 
66 static inline struct intel_uncore_discovery_type *
67 search_uncore_discovery_type(u16 type_id)
68 {
69 	struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
70 
71 	return (node) ? __node_2_type(node) : NULL;
72 }
73 
74 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
75 {
76 	return (__node_2_type(a)->type < __node_2_type(b)->type);
77 }
78 
79 static struct intel_uncore_discovery_type *
80 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
81 {
82 	struct intel_uncore_discovery_type *type;
83 
84 	if (unit->access_type >= UNCORE_ACCESS_MAX) {
85 		pr_warn("Unsupported access type %d\n", unit->access_type);
86 		return NULL;
87 	}
88 
89 	type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
90 	if (!type)
91 		return NULL;
92 
93 	type->units = RB_ROOT;
94 
95 	type->access_type = unit->access_type;
96 	num_discovered_types[type->access_type]++;
97 	type->type = unit->box_type;
98 
99 	rb_add(&type->node, &discovery_tables, __type_less);
100 
101 	return type;
102 }
103 
104 static struct intel_uncore_discovery_type *
105 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
106 {
107 	struct intel_uncore_discovery_type *type;
108 
109 	type = search_uncore_discovery_type(unit->box_type);
110 	if (type)
111 		return type;
112 
113 	return add_uncore_discovery_type(unit);
114 }
115 
116 static inline int pmu_idx_cmp(const void *key, const struct rb_node *b)
117 {
118 	struct intel_uncore_discovery_unit *unit;
119 	const unsigned int *id = key;
120 
121 	unit = rb_entry(b, struct intel_uncore_discovery_unit, node);
122 
123 	if (unit->pmu_idx > *id)
124 		return -1;
125 	else if (unit->pmu_idx < *id)
126 		return 1;
127 
128 	return 0;
129 }
130 
131 static struct intel_uncore_discovery_unit *
132 intel_uncore_find_discovery_unit(struct rb_root *units, int die,
133 				 unsigned int pmu_idx)
134 {
135 	struct intel_uncore_discovery_unit *unit;
136 	struct rb_node *pos;
137 
138 	if (!units)
139 		return NULL;
140 
141 	pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp);
142 	if (!pos)
143 		return NULL;
144 	unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
145 
146 	if (die < 0)
147 		return unit;
148 
149 	for (; pos; pos = rb_next(pos)) {
150 		unit = rb_entry(pos, struct intel_uncore_discovery_unit, node);
151 
152 		if (unit->pmu_idx != pmu_idx)
153 			break;
154 
155 		if (unit->die == die)
156 			return unit;
157 	}
158 
159 	return NULL;
160 }
161 
162 int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die,
163 					unsigned int pmu_idx)
164 {
165 	struct intel_uncore_discovery_unit *unit;
166 
167 	unit = intel_uncore_find_discovery_unit(units, die, pmu_idx);
168 	if (unit)
169 		return unit->id;
170 
171 	return -1;
172 }
173 
174 static inline bool unit_less(struct rb_node *a, const struct rb_node *b)
175 {
176 	struct intel_uncore_discovery_unit *a_node, *b_node;
177 
178 	a_node = rb_entry(a, struct intel_uncore_discovery_unit, node);
179 	b_node = rb_entry(b, struct intel_uncore_discovery_unit, node);
180 
181 	if (a_node->pmu_idx < b_node->pmu_idx)
182 		return true;
183 	if (a_node->pmu_idx > b_node->pmu_idx)
184 		return false;
185 
186 	if (a_node->die < b_node->die)
187 		return true;
188 	if (a_node->die > b_node->die)
189 		return false;
190 
191 	return 0;
192 }
193 
194 static inline struct intel_uncore_discovery_unit *
195 uncore_find_unit(struct rb_root *root, unsigned int id)
196 {
197 	struct intel_uncore_discovery_unit *unit;
198 	struct rb_node *node;
199 
200 	for (node = rb_first(root); node; node = rb_next(node)) {
201 		unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
202 		if (unit->id == id)
203 			return unit;
204 	}
205 
206 	return NULL;
207 }
208 
209 void uncore_find_add_unit(struct intel_uncore_discovery_unit *node,
210 			  struct rb_root *root, u16 *num_units)
211 {
212 	struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id);
213 
214 	if (unit)
215 		node->pmu_idx = unit->pmu_idx;
216 	else if (num_units)
217 		node->pmu_idx = (*num_units)++;
218 
219 	rb_add(&node->node, root, unit_less);
220 }
221 
222 static void
223 uncore_insert_box_info(struct uncore_unit_discovery *unit,
224 		       int die)
225 {
226 	struct intel_uncore_discovery_unit *node;
227 	struct intel_uncore_discovery_type *type;
228 
229 	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
230 		pr_info("Invalid address is detected for uncore type %d box %d, "
231 			"Disable the uncore unit.\n",
232 			unit->box_type, unit->box_id);
233 		return;
234 	}
235 
236 	node = kzalloc(sizeof(*node), GFP_KERNEL);
237 	if (!node)
238 		return;
239 
240 	node->die = die;
241 	node->id = unit->box_id;
242 	node->addr = unit->ctl;
243 
244 	type = get_uncore_discovery_type(unit);
245 	if (!type) {
246 		kfree(node);
247 		return;
248 	}
249 
250 	uncore_find_add_unit(node, &type->units, &type->num_units);
251 
252 	/* Store generic information for the first box */
253 	if (type->num_units == 1) {
254 		type->num_counters = unit->num_regs;
255 		type->counter_width = unit->bit_width;
256 		type->ctl_offset = unit->ctl_offset;
257 		type->ctr_offset = unit->ctr_offset;
258 	}
259 }
260 
261 static bool
262 uncore_ignore_unit(struct uncore_unit_discovery *unit, int *ignore)
263 {
264 	int i;
265 
266 	if (!ignore)
267 		return false;
268 
269 	for (i = 0; ignore[i] != UNCORE_IGNORE_END ; i++) {
270 		if (unit->box_type == ignore[i])
271 			return true;
272 	}
273 
274 	return false;
275 }
276 
277 static int parse_discovery_table(struct pci_dev *dev, int die,
278 				 u32 bar_offset, bool *parsed,
279 				 int *ignore)
280 {
281 	struct uncore_global_discovery global;
282 	struct uncore_unit_discovery unit;
283 	void __iomem *io_addr;
284 	resource_size_t addr;
285 	unsigned long size;
286 	u32 val;
287 	int i;
288 
289 	pci_read_config_dword(dev, bar_offset, &val);
290 
291 	if (val & ~PCI_BASE_ADDRESS_MEM_MASK & ~PCI_BASE_ADDRESS_MEM_TYPE_64)
292 		return -EINVAL;
293 
294 	addr = (resource_size_t)(val & PCI_BASE_ADDRESS_MEM_MASK);
295 #ifdef CONFIG_PHYS_ADDR_T_64BIT
296 	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
297 		u32 val2;
298 
299 		pci_read_config_dword(dev, bar_offset + 4, &val2);
300 		addr |= ((resource_size_t)val2) << 32;
301 	}
302 #endif
303 	size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
304 	io_addr = ioremap(addr, size);
305 	if (!io_addr)
306 		return -ENOMEM;
307 
308 	/* Read Global Discovery State */
309 	memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
310 	if (uncore_discovery_invalid_unit(global)) {
311 		pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
312 			global.table1, global.ctl, global.table3);
313 		iounmap(io_addr);
314 		return -EINVAL;
315 	}
316 	iounmap(io_addr);
317 
318 	size = (1 + global.max_units) * global.stride * 8;
319 	io_addr = ioremap(addr, size);
320 	if (!io_addr)
321 		return -ENOMEM;
322 
323 	/* Parsing Unit Discovery State */
324 	for (i = 0; i < global.max_units; i++) {
325 		memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
326 			      sizeof(struct uncore_unit_discovery));
327 
328 		if (uncore_discovery_invalid_unit(unit))
329 			continue;
330 
331 		if (unit.access_type >= UNCORE_ACCESS_MAX)
332 			continue;
333 
334 		if (uncore_ignore_unit(&unit, ignore))
335 			continue;
336 
337 		uncore_insert_box_info(&unit, die);
338 	}
339 
340 	*parsed = true;
341 	iounmap(io_addr);
342 	return 0;
343 }
344 
345 bool intel_uncore_has_discovery_tables(int *ignore)
346 {
347 	u32 device, val, entry_id, bar_offset;
348 	int die, dvsec = 0, ret = true;
349 	struct pci_dev *dev = NULL;
350 	bool parsed = false;
351 
352 	if (has_generic_discovery_table())
353 		device = UNCORE_DISCOVERY_TABLE_DEVICE;
354 	else
355 		device = PCI_ANY_ID;
356 
357 	/*
358 	 * Start a new search and iterates through the list of
359 	 * the discovery table devices.
360 	 */
361 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
362 		while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
363 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
364 			entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
365 			if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
366 				continue;
367 
368 			pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
369 
370 			if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
371 				ret = false;
372 				goto err;
373 			}
374 			bar_offset = UNCORE_DISCOVERY_BIR_BASE +
375 				     (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
376 
377 			die = get_device_die_id(dev);
378 			if (die < 0)
379 				continue;
380 
381 			parse_discovery_table(dev, die, bar_offset, &parsed, ignore);
382 		}
383 	}
384 
385 	/* None of the discovery tables are available */
386 	if (!parsed)
387 		ret = false;
388 err:
389 	pci_dev_put(dev);
390 
391 	return ret;
392 }
393 
394 void intel_uncore_clear_discovery_tables(void)
395 {
396 	struct intel_uncore_discovery_type *type, *next;
397 	struct intel_uncore_discovery_unit *pos;
398 	struct rb_node *node;
399 
400 	rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
401 		while (!RB_EMPTY_ROOT(&type->units)) {
402 			node = rb_first(&type->units);
403 			pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
404 			rb_erase(node, &type->units);
405 			kfree(pos);
406 		}
407 		kfree(type);
408 	}
409 }
410 
411 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
412 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
413 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
414 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
415 DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
416 
417 static struct attribute *generic_uncore_formats_attr[] = {
418 	&format_attr_event.attr,
419 	&format_attr_umask.attr,
420 	&format_attr_edge.attr,
421 	&format_attr_inv.attr,
422 	&format_attr_thresh.attr,
423 	NULL,
424 };
425 
426 static const struct attribute_group generic_uncore_format_group = {
427 	.name = "format",
428 	.attrs = generic_uncore_formats_attr,
429 };
430 
431 static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box)
432 {
433 	struct intel_uncore_discovery_unit *unit;
434 
435 	unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes,
436 						-1, box->pmu->pmu_idx);
437 	if (WARN_ON_ONCE(!unit))
438 		return 0;
439 
440 	return unit->addr;
441 }
442 
443 void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
444 {
445 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
446 }
447 
448 void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
449 {
450 	wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
451 }
452 
453 void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
454 {
455 	wrmsrq(intel_generic_uncore_box_ctl(box), 0);
456 }
457 
458 static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
459 					    struct perf_event *event)
460 {
461 	struct hw_perf_event *hwc = &event->hw;
462 
463 	wrmsrq(hwc->config_base, hwc->config);
464 }
465 
466 static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
467 					     struct perf_event *event)
468 {
469 	struct hw_perf_event *hwc = &event->hw;
470 
471 	wrmsrq(hwc->config_base, 0);
472 }
473 
474 static struct intel_uncore_ops generic_uncore_msr_ops = {
475 	.init_box		= intel_generic_uncore_msr_init_box,
476 	.disable_box		= intel_generic_uncore_msr_disable_box,
477 	.enable_box		= intel_generic_uncore_msr_enable_box,
478 	.disable_event		= intel_generic_uncore_msr_disable_event,
479 	.enable_event		= intel_generic_uncore_msr_enable_event,
480 	.read_counter		= uncore_msr_read_counter,
481 };
482 
483 bool intel_generic_uncore_assign_hw_event(struct perf_event *event,
484 					  struct intel_uncore_box *box)
485 {
486 	struct hw_perf_event *hwc = &event->hw;
487 	u64 box_ctl;
488 
489 	if (!box->pmu->type->boxes)
490 		return false;
491 
492 	if (box->io_addr) {
493 		hwc->config_base = uncore_pci_event_ctl(box, hwc->idx);
494 		hwc->event_base  = uncore_pci_perf_ctr(box, hwc->idx);
495 		return true;
496 	}
497 
498 	box_ctl = intel_generic_uncore_box_ctl(box);
499 	if (!box_ctl)
500 		return false;
501 
502 	if (box->pci_dev) {
503 		box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl);
504 		hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx);
505 		hwc->event_base  = box_ctl + uncore_pci_perf_ctr(box, hwc->idx);
506 		return true;
507 	}
508 
509 	hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx;
510 	hwc->event_base  = box_ctl + box->pmu->type->perf_ctr + hwc->idx;
511 
512 	return true;
513 }
514 
515 static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box)
516 {
517 	return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box));
518 }
519 
520 void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
521 {
522 	struct pci_dev *pdev = box->pci_dev;
523 	int box_ctl = intel_pci_uncore_box_ctl(box);
524 
525 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
526 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
527 }
528 
529 void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531 	struct pci_dev *pdev = box->pci_dev;
532 	int box_ctl = intel_pci_uncore_box_ctl(box);
533 
534 	pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
535 }
536 
537 void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
538 {
539 	struct pci_dev *pdev = box->pci_dev;
540 	int box_ctl = intel_pci_uncore_box_ctl(box);
541 
542 	pci_write_config_dword(pdev, box_ctl, 0);
543 }
544 
545 static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
546 					    struct perf_event *event)
547 {
548 	struct pci_dev *pdev = box->pci_dev;
549 	struct hw_perf_event *hwc = &event->hw;
550 
551 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
552 }
553 
554 void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
555 					    struct perf_event *event)
556 {
557 	struct pci_dev *pdev = box->pci_dev;
558 	struct hw_perf_event *hwc = &event->hw;
559 
560 	pci_write_config_dword(pdev, hwc->config_base, 0);
561 }
562 
563 u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
564 					  struct perf_event *event)
565 {
566 	struct pci_dev *pdev = box->pci_dev;
567 	struct hw_perf_event *hwc = &event->hw;
568 	u64 count = 0;
569 
570 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
571 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
572 
573 	return count;
574 }
575 
576 static struct intel_uncore_ops generic_uncore_pci_ops = {
577 	.init_box	= intel_generic_uncore_pci_init_box,
578 	.disable_box	= intel_generic_uncore_pci_disable_box,
579 	.enable_box	= intel_generic_uncore_pci_enable_box,
580 	.disable_event	= intel_generic_uncore_pci_disable_event,
581 	.enable_event	= intel_generic_uncore_pci_enable_event,
582 	.read_counter	= intel_generic_uncore_pci_read_counter,
583 };
584 
585 #define UNCORE_GENERIC_MMIO_SIZE		0x4000
586 
587 void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
588 {
589 	static struct intel_uncore_discovery_unit *unit;
590 	struct intel_uncore_type *type = box->pmu->type;
591 	resource_size_t addr;
592 
593 	unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx);
594 	if (!unit) {
595 		pr_warn("Uncore type %d id %d: Cannot find box control address.\n",
596 			type->type_id, box->pmu->pmu_idx);
597 		return;
598 	}
599 
600 	if (!unit->addr) {
601 		pr_warn("Uncore type %d box %d: Invalid box control address.\n",
602 			type->type_id, unit->id);
603 		return;
604 	}
605 
606 	addr = unit->addr;
607 	box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
608 	if (!box->io_addr) {
609 		pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
610 			type->type_id, unit->id, (unsigned long long)addr);
611 		return;
612 	}
613 
614 	writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
615 }
616 
617 void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
618 {
619 	if (!box->io_addr)
620 		return;
621 
622 	writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
623 }
624 
625 void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
626 {
627 	if (!box->io_addr)
628 		return;
629 
630 	writel(0, box->io_addr);
631 }
632 
633 void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
634 					    struct perf_event *event)
635 {
636 	struct hw_perf_event *hwc = &event->hw;
637 
638 	if (!box->io_addr)
639 		return;
640 
641 	writel(hwc->config, box->io_addr + hwc->config_base);
642 }
643 
644 void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
645 					     struct perf_event *event)
646 {
647 	struct hw_perf_event *hwc = &event->hw;
648 
649 	if (!box->io_addr)
650 		return;
651 
652 	writel(0, box->io_addr + hwc->config_base);
653 }
654 
655 static struct intel_uncore_ops generic_uncore_mmio_ops = {
656 	.init_box	= intel_generic_uncore_mmio_init_box,
657 	.exit_box	= uncore_mmio_exit_box,
658 	.disable_box	= intel_generic_uncore_mmio_disable_box,
659 	.enable_box	= intel_generic_uncore_mmio_enable_box,
660 	.disable_event	= intel_generic_uncore_mmio_disable_event,
661 	.enable_event	= intel_generic_uncore_mmio_enable_event,
662 	.read_counter	= uncore_mmio_read_counter,
663 };
664 
665 static bool uncore_update_uncore_type(enum uncore_access_type type_id,
666 				      struct intel_uncore_type *uncore,
667 				      struct intel_uncore_discovery_type *type)
668 {
669 	uncore->type_id = type->type;
670 	uncore->num_counters = type->num_counters;
671 	uncore->perf_ctr_bits = type->counter_width;
672 	uncore->perf_ctr = (unsigned int)type->ctr_offset;
673 	uncore->event_ctl = (unsigned int)type->ctl_offset;
674 	uncore->boxes = &type->units;
675 	uncore->num_boxes = type->num_units;
676 
677 	switch (type_id) {
678 	case UNCORE_ACCESS_MSR:
679 		uncore->ops = &generic_uncore_msr_ops;
680 		break;
681 	case UNCORE_ACCESS_PCI:
682 		uncore->ops = &generic_uncore_pci_ops;
683 		break;
684 	case UNCORE_ACCESS_MMIO:
685 		uncore->ops = &generic_uncore_mmio_ops;
686 		uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
687 		break;
688 	default:
689 		return false;
690 	}
691 
692 	return true;
693 }
694 
695 struct intel_uncore_type **
696 intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
697 {
698 	struct intel_uncore_discovery_type *type;
699 	struct intel_uncore_type **uncores;
700 	struct intel_uncore_type *uncore;
701 	struct rb_node *node;
702 	int i = 0;
703 
704 	uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
705 			  sizeof(struct intel_uncore_type *), GFP_KERNEL);
706 	if (!uncores)
707 		return empty_uncore;
708 
709 	for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
710 		type = rb_entry(node, struct intel_uncore_discovery_type, node);
711 		if (type->access_type != type_id)
712 			continue;
713 
714 		uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
715 		if (!uncore)
716 			break;
717 
718 		uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
719 		uncore->format_group = &generic_uncore_format_group;
720 
721 		if (!uncore_update_uncore_type(type_id, uncore, type)) {
722 			kfree(uncore);
723 			continue;
724 		}
725 		uncores[i++] = uncore;
726 	}
727 
728 	return uncores;
729 }
730 
731 void intel_uncore_generic_uncore_cpu_init(void)
732 {
733 	uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
734 }
735 
736 int intel_uncore_generic_uncore_pci_init(void)
737 {
738 	uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
739 
740 	return 0;
741 }
742 
743 void intel_uncore_generic_uncore_mmio_init(void)
744 {
745 	uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
746 }
747