xref: /linux/arch/x86/events/amd/iommu.c (revision 975f14fa8f2996604f248552eee4403def34bf5e)
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Steven Kinney <Steven.Kinney@amd.com>
5  * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
6  *
7  * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/perf_event.h>
15 #include <linux/module.h>
16 #include <linux/cpumask.h>
17 #include <linux/slab.h>
18 
19 #include "../../kernel/cpu/perf_event.h"
20 #include "iommu.h"
21 
22 #define COUNTER_SHIFT		16
23 
24 #define _GET_BANK(ev)       ((u8)(ev->hw.extra_reg.reg >> 8))
25 #define _GET_CNTR(ev)       ((u8)(ev->hw.extra_reg.reg))
26 
27 /* iommu pmu config masks */
28 #define _GET_CSOURCE(ev)    ((ev->hw.config & 0xFFULL))
29 #define _GET_DEVID(ev)      ((ev->hw.config >> 8)  & 0xFFFFULL)
30 #define _GET_PASID(ev)      ((ev->hw.config >> 24) & 0xFFFFULL)
31 #define _GET_DOMID(ev)      ((ev->hw.config >> 40) & 0xFFFFULL)
32 #define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config)  & 0xFFFFULL)
33 #define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL)
34 #define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL)
35 
36 static struct perf_amd_iommu __perf_iommu;
37 
38 struct perf_amd_iommu {
39 	struct pmu pmu;
40 	u8 max_banks;
41 	u8 max_counters;
42 	u64 cntr_assign_mask;
43 	raw_spinlock_t lock;
44 	const struct attribute_group *attr_groups[4];
45 };
46 
47 #define format_group	attr_groups[0]
48 #define cpumask_group	attr_groups[1]
49 #define events_group	attr_groups[2]
50 #define null_group	attr_groups[3]
51 
52 /*---------------------------------------------
53  * sysfs format attributes
54  *---------------------------------------------*/
55 PMU_FORMAT_ATTR(csource,    "config:0-7");
56 PMU_FORMAT_ATTR(devid,      "config:8-23");
57 PMU_FORMAT_ATTR(pasid,      "config:24-39");
58 PMU_FORMAT_ATTR(domid,      "config:40-55");
59 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
60 PMU_FORMAT_ATTR(pasid_mask, "config1:16-31");
61 PMU_FORMAT_ATTR(domid_mask, "config1:32-47");
62 
63 static struct attribute *iommu_format_attrs[] = {
64 	&format_attr_csource.attr,
65 	&format_attr_devid.attr,
66 	&format_attr_pasid.attr,
67 	&format_attr_domid.attr,
68 	&format_attr_devid_mask.attr,
69 	&format_attr_pasid_mask.attr,
70 	&format_attr_domid_mask.attr,
71 	NULL,
72 };
73 
74 static struct attribute_group amd_iommu_format_group = {
75 	.name = "format",
76 	.attrs = iommu_format_attrs,
77 };
78 
79 /*---------------------------------------------
80  * sysfs events attributes
81  *---------------------------------------------*/
82 struct amd_iommu_event_desc {
83 	struct kobj_attribute attr;
84 	const char *event;
85 };
86 
87 static ssize_t _iommu_event_show(struct kobject *kobj,
88 				struct kobj_attribute *attr, char *buf)
89 {
90 	struct amd_iommu_event_desc *event =
91 		container_of(attr, struct amd_iommu_event_desc, attr);
92 	return sprintf(buf, "%s\n", event->event);
93 }
94 
95 #define AMD_IOMMU_EVENT_DESC(_name, _event)			\
96 {								\
97 	.attr  = __ATTR(_name, 0444, _iommu_event_show, NULL),	\
98 	.event = _event,					\
99 }
100 
101 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
102 	AMD_IOMMU_EVENT_DESC(mem_pass_untrans,        "csource=0x01"),
103 	AMD_IOMMU_EVENT_DESC(mem_pass_pretrans,       "csource=0x02"),
104 	AMD_IOMMU_EVENT_DESC(mem_pass_excl,           "csource=0x03"),
105 	AMD_IOMMU_EVENT_DESC(mem_target_abort,        "csource=0x04"),
106 	AMD_IOMMU_EVENT_DESC(mem_trans_total,         "csource=0x05"),
107 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit,   "csource=0x06"),
108 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis,   "csource=0x07"),
109 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit,   "csource=0x08"),
110 	AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis,   "csource=0x09"),
111 	AMD_IOMMU_EVENT_DESC(mem_dte_hit,             "csource=0x0a"),
112 	AMD_IOMMU_EVENT_DESC(mem_dte_mis,             "csource=0x0b"),
113 	AMD_IOMMU_EVENT_DESC(page_tbl_read_tot,       "csource=0x0c"),
114 	AMD_IOMMU_EVENT_DESC(page_tbl_read_nst,       "csource=0x0d"),
115 	AMD_IOMMU_EVENT_DESC(page_tbl_read_gst,       "csource=0x0e"),
116 	AMD_IOMMU_EVENT_DESC(int_dte_hit,             "csource=0x0f"),
117 	AMD_IOMMU_EVENT_DESC(int_dte_mis,             "csource=0x10"),
118 	AMD_IOMMU_EVENT_DESC(cmd_processed,           "csource=0x11"),
119 	AMD_IOMMU_EVENT_DESC(cmd_processed_inv,       "csource=0x12"),
120 	AMD_IOMMU_EVENT_DESC(tlb_inv,                 "csource=0x13"),
121 	{ /* end: all zeroes */ },
122 };
123 
124 /*---------------------------------------------
125  * sysfs cpumask attributes
126  *---------------------------------------------*/
127 static cpumask_t iommu_cpumask;
128 
129 static ssize_t _iommu_cpumask_show(struct device *dev,
130 				   struct device_attribute *attr,
131 				   char *buf)
132 {
133 	return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
134 }
135 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
136 
137 static struct attribute *iommu_cpumask_attrs[] = {
138 	&dev_attr_cpumask.attr,
139 	NULL,
140 };
141 
142 static struct attribute_group amd_iommu_cpumask_group = {
143 	.attrs = iommu_cpumask_attrs,
144 };
145 
146 /*---------------------------------------------*/
147 
148 static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
149 {
150 	unsigned long flags;
151 	int shift, bank, cntr, retval;
152 	int max_banks = perf_iommu->max_banks;
153 	int max_cntrs = perf_iommu->max_counters;
154 
155 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
156 
157 	for (bank = 0, shift = 0; bank < max_banks; bank++) {
158 		for (cntr = 0; cntr < max_cntrs; cntr++) {
159 			shift = bank + (bank*3) + cntr;
160 			if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
161 				continue;
162 			} else {
163 				perf_iommu->cntr_assign_mask |= (1ULL<<shift);
164 				retval = ((u16)((u16)bank<<8) | (u8)(cntr));
165 				goto out;
166 			}
167 		}
168 	}
169 	retval = -ENOSPC;
170 out:
171 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
172 	return retval;
173 }
174 
175 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
176 					u8 bank, u8 cntr)
177 {
178 	unsigned long flags;
179 	int max_banks, max_cntrs;
180 	int shift = 0;
181 
182 	max_banks = perf_iommu->max_banks;
183 	max_cntrs = perf_iommu->max_counters;
184 
185 	if ((bank > max_banks) || (cntr > max_cntrs))
186 		return -EINVAL;
187 
188 	shift = bank + cntr + (bank*3);
189 
190 	raw_spin_lock_irqsave(&perf_iommu->lock, flags);
191 	perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
192 	raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
193 
194 	return 0;
195 }
196 
197 static int perf_iommu_event_init(struct perf_event *event)
198 {
199 	struct hw_perf_event *hwc = &event->hw;
200 	struct perf_amd_iommu *perf_iommu;
201 	u64 config, config1;
202 
203 	/* test the event attr type check for PMU enumeration */
204 	if (event->attr.type != event->pmu->type)
205 		return -ENOENT;
206 
207 	/*
208 	 * IOMMU counters are shared across all cores.
209 	 * Therefore, it does not support per-process mode.
210 	 * Also, it does not support event sampling mode.
211 	 */
212 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
213 		return -EINVAL;
214 
215 	/* IOMMU counters do not have usr/os/guest/host bits */
216 	if (event->attr.exclude_user || event->attr.exclude_kernel ||
217 	    event->attr.exclude_host || event->attr.exclude_guest)
218 		return -EINVAL;
219 
220 	if (event->cpu < 0)
221 		return -EINVAL;
222 
223 	perf_iommu = &__perf_iommu;
224 
225 	if (event->pmu != &perf_iommu->pmu)
226 		return -ENOENT;
227 
228 	if (perf_iommu) {
229 		config = event->attr.config;
230 		config1 = event->attr.config1;
231 	} else {
232 		return -EINVAL;
233 	}
234 
235 	/* integrate with iommu base devid (0000), assume one iommu */
236 	perf_iommu->max_banks =
237 		amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID);
238 	perf_iommu->max_counters =
239 		amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID);
240 	if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0))
241 		return -EINVAL;
242 
243 	/* update the hw_perf_event struct with the iommu config data */
244 	hwc->config = config;
245 	hwc->extra_reg.config = config1;
246 
247 	return 0;
248 }
249 
250 static void perf_iommu_enable_event(struct perf_event *ev)
251 {
252 	u8 csource = _GET_CSOURCE(ev);
253 	u16 devid = _GET_DEVID(ev);
254 	u64 reg = 0ULL;
255 
256 	reg = csource;
257 	amd_iommu_pc_get_set_reg_val(devid,
258 			_GET_BANK(ev), _GET_CNTR(ev) ,
259 			 IOMMU_PC_COUNTER_SRC_REG, &reg, true);
260 
261 	reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
262 	if (reg)
263 		reg |= (1UL << 31);
264 	amd_iommu_pc_get_set_reg_val(devid,
265 			_GET_BANK(ev), _GET_CNTR(ev) ,
266 			 IOMMU_PC_DEVID_MATCH_REG, &reg, true);
267 
268 	reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
269 	if (reg)
270 		reg |= (1UL << 31);
271 	amd_iommu_pc_get_set_reg_val(devid,
272 			_GET_BANK(ev), _GET_CNTR(ev) ,
273 			 IOMMU_PC_PASID_MATCH_REG, &reg, true);
274 
275 	reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
276 	if (reg)
277 		reg |= (1UL << 31);
278 	amd_iommu_pc_get_set_reg_val(devid,
279 			_GET_BANK(ev), _GET_CNTR(ev) ,
280 			 IOMMU_PC_DOMID_MATCH_REG, &reg, true);
281 }
282 
283 static void perf_iommu_disable_event(struct perf_event *event)
284 {
285 	u64 reg = 0ULL;
286 
287 	amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
288 			_GET_BANK(event), _GET_CNTR(event),
289 			IOMMU_PC_COUNTER_SRC_REG, &reg, true);
290 }
291 
292 static void perf_iommu_start(struct perf_event *event, int flags)
293 {
294 	struct hw_perf_event *hwc = &event->hw;
295 
296 	pr_debug("perf: amd_iommu:perf_iommu_start\n");
297 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
298 		return;
299 
300 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
301 	hwc->state = 0;
302 
303 	if (flags & PERF_EF_RELOAD) {
304 		u64 prev_raw_count =  local64_read(&hwc->prev_count);
305 		amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
306 				_GET_BANK(event), _GET_CNTR(event),
307 				IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
308 	}
309 
310 	perf_iommu_enable_event(event);
311 	perf_event_update_userpage(event);
312 
313 }
314 
315 static void perf_iommu_read(struct perf_event *event)
316 {
317 	u64 count = 0ULL;
318 	u64 prev_raw_count = 0ULL;
319 	u64 delta = 0ULL;
320 	struct hw_perf_event *hwc = &event->hw;
321 	pr_debug("perf: amd_iommu:perf_iommu_read\n");
322 
323 	amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
324 				_GET_BANK(event), _GET_CNTR(event),
325 				IOMMU_PC_COUNTER_REG, &count, false);
326 
327 	/* IOMMU pc counter register is only 48 bits */
328 	count &= 0xFFFFFFFFFFFFULL;
329 
330 	prev_raw_count =  local64_read(&hwc->prev_count);
331 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
332 					count) != prev_raw_count)
333 		return;
334 
335 	/* Handling 48-bit counter overflowing */
336 	delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT);
337 	delta >>= COUNTER_SHIFT;
338 	local64_add(delta, &event->count);
339 
340 }
341 
342 static void perf_iommu_stop(struct perf_event *event, int flags)
343 {
344 	struct hw_perf_event *hwc = &event->hw;
345 	u64 config;
346 
347 	pr_debug("perf: amd_iommu:perf_iommu_stop\n");
348 
349 	if (hwc->state & PERF_HES_UPTODATE)
350 		return;
351 
352 	perf_iommu_disable_event(event);
353 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
354 	hwc->state |= PERF_HES_STOPPED;
355 
356 	if (hwc->state & PERF_HES_UPTODATE)
357 		return;
358 
359 	config = hwc->config;
360 	perf_iommu_read(event);
361 	hwc->state |= PERF_HES_UPTODATE;
362 }
363 
364 static int perf_iommu_add(struct perf_event *event, int flags)
365 {
366 	int retval;
367 	struct perf_amd_iommu *perf_iommu =
368 			container_of(event->pmu, struct perf_amd_iommu, pmu);
369 
370 	pr_debug("perf: amd_iommu:perf_iommu_add\n");
371 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
372 
373 	/* request an iommu bank/counter */
374 	retval = get_next_avail_iommu_bnk_cntr(perf_iommu);
375 	if (retval != -ENOSPC)
376 		event->hw.extra_reg.reg = (u16)retval;
377 	else
378 		return retval;
379 
380 	if (flags & PERF_EF_START)
381 		perf_iommu_start(event, PERF_EF_RELOAD);
382 
383 	return 0;
384 }
385 
386 static void perf_iommu_del(struct perf_event *event, int flags)
387 {
388 	struct perf_amd_iommu *perf_iommu =
389 			container_of(event->pmu, struct perf_amd_iommu, pmu);
390 
391 	pr_debug("perf: amd_iommu:perf_iommu_del\n");
392 	perf_iommu_stop(event, PERF_EF_UPDATE);
393 
394 	/* clear the assigned iommu bank/counter */
395 	clear_avail_iommu_bnk_cntr(perf_iommu,
396 				     _GET_BANK(event),
397 				     _GET_CNTR(event));
398 
399 	perf_event_update_userpage(event);
400 }
401 
402 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
403 {
404 	struct attribute **attrs;
405 	struct attribute_group *attr_group;
406 	int i = 0, j;
407 
408 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
409 		i++;
410 
411 	attr_group = kzalloc(sizeof(struct attribute *)
412 		* (i + 1) + sizeof(*attr_group), GFP_KERNEL);
413 	if (!attr_group)
414 		return -ENOMEM;
415 
416 	attrs = (struct attribute **)(attr_group + 1);
417 	for (j = 0; j < i; j++)
418 		attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
419 
420 	attr_group->name = "events";
421 	attr_group->attrs = attrs;
422 	perf_iommu->events_group = attr_group;
423 
424 	return 0;
425 }
426 
427 static __init void amd_iommu_pc_exit(void)
428 {
429 	if (__perf_iommu.events_group != NULL) {
430 		kfree(__perf_iommu.events_group);
431 		__perf_iommu.events_group = NULL;
432 	}
433 }
434 
435 static __init int _init_perf_amd_iommu(
436 	struct perf_amd_iommu *perf_iommu, char *name)
437 {
438 	int ret;
439 
440 	raw_spin_lock_init(&perf_iommu->lock);
441 
442 	/* Init format attributes */
443 	perf_iommu->format_group = &amd_iommu_format_group;
444 
445 	/* Init cpumask attributes to only core 0 */
446 	cpumask_set_cpu(0, &iommu_cpumask);
447 	perf_iommu->cpumask_group = &amd_iommu_cpumask_group;
448 
449 	/* Init events attributes */
450 	if (_init_events_attrs(perf_iommu) != 0)
451 		pr_err("perf: amd_iommu: Only support raw events.\n");
452 
453 	/* Init null attributes */
454 	perf_iommu->null_group = NULL;
455 	perf_iommu->pmu.attr_groups = perf_iommu->attr_groups;
456 
457 	ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
458 	if (ret) {
459 		pr_err("perf: amd_iommu: Failed to initialized.\n");
460 		amd_iommu_pc_exit();
461 	} else {
462 		pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n",
463 			amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID),
464 			amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID));
465 	}
466 
467 	return ret;
468 }
469 
470 static struct perf_amd_iommu __perf_iommu = {
471 	.pmu = {
472 		.event_init	= perf_iommu_event_init,
473 		.add		= perf_iommu_add,
474 		.del		= perf_iommu_del,
475 		.start		= perf_iommu_start,
476 		.stop		= perf_iommu_stop,
477 		.read		= perf_iommu_read,
478 	},
479 	.max_banks		= 0x00,
480 	.max_counters		= 0x00,
481 	.cntr_assign_mask	= 0ULL,
482 	.format_group		= NULL,
483 	.cpumask_group		= NULL,
484 	.events_group		= NULL,
485 	.null_group		= NULL,
486 };
487 
488 static __init int amd_iommu_pc_init(void)
489 {
490 	/* Make sure the IOMMU PC resource is available */
491 	if (!amd_iommu_pc_supported())
492 		return -ENODEV;
493 
494 	_init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
495 
496 	return 0;
497 }
498 
499 device_initcall(amd_iommu_pc_init);
500