xref: /freebsd/sys/x86/iommu/amd_event.c (revision 0f5116d7efe33c81f0b24b56eec78af37898f500)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include "opt_acpi.h"
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/memdesc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/rwlock.h>
43 #include <sys/smp.h>
44 #include <sys/taskqueue.h>
45 #include <sys/tree.h>
46 #include <sys/vmem.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_pager.h>
53 #include <contrib/dev/acpica/include/acpi.h>
54 #include <contrib/dev/acpica/include/accommon.h>
55 #include <dev/acpica/acpivar.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <machine/bus.h>
59 #include <machine/pci_cfgreg.h>
60 #include "pcib_if.h"
61 #include <machine/intr_machdep.h>
62 #include <machine/md_var.h>
63 #include <machine/cputypes.h>
64 #include <x86/apicreg.h>
65 #include <x86/apicvar.h>
66 #include <dev/iommu/iommu.h>
67 #include <dev/iommu/busdma_iommu.h>
68 #include <x86/iommu/amd_reg.h>
69 #include <x86/iommu/x86_iommu.h>
70 #include <x86/iommu/amd_iommu.h>
71 
72 static void
amdiommu_event_rearm_intr(struct amdiommu_unit * unit)73 amdiommu_event_rearm_intr(struct amdiommu_unit *unit)
74 {
75 	amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
76 	    AMDIOMMU_CMDEVS_EVLOGINT);
77 }
78 
79 static void
amdiommu_event_log_inc_head(struct amdiommu_unit * unit)80 amdiommu_event_log_inc_head(struct amdiommu_unit *unit)
81 {
82 	unit->event_log_head++;
83 	if (unit->event_log_head >= unit->event_log_size)
84 		unit->event_log_head = 0;
85 }
86 
87 static void
amdiommu_event_log_print(struct amdiommu_unit * unit,const struct amdiommu_event_generic * evp,bool fancy)88 amdiommu_event_log_print(struct amdiommu_unit *unit,
89     const struct amdiommu_event_generic *evp, bool fancy)
90 {
91 	printf("amdiommu%d: event type 0x%x 0x%08x 0x%08x 0x%08x 0x%08x\n",
92 	    unit->iommu.unit, evp->code, evp->w0, evp->ww1, evp->w2, evp->w3);
93 	if (!fancy)
94 		return;
95 
96 	AMDIOMMU_ASSERT_LOCKED(unit);
97 	if (evp->code == AMDIOMMU_EV_ILL_DEV_TABLE_ENTRY) {
98 		const struct amdiommu_event_ill_dev_table_entry *ev_dte_p;
99 		const struct amdiommu_dte *dte;
100 		const uint32_t *x;
101 		int i;
102 
103 		ev_dte_p = (const struct
104 		    amdiommu_event_ill_dev_table_entry *)evp;
105 		dte = &unit->dev_tbl[ev_dte_p->devid];
106 
107 		printf("\tIllegal Dev Tab Entry dte@%p:", dte);
108 		for (i = 0, x = (const uint32_t *)dte; i < sizeof(*dte) /
109 		    sizeof(uint32_t); i++, x++)
110 			printf(" 0x%08x", *x);
111 		printf("\n");
112 	} else if (evp->code == AMDIOMMU_EV_IO_PAGE_FAULT) {
113 		const struct amdiommu_event_io_page_fault_entry *ev_iopf_p;
114 		struct amdiommu_ctx *ctx;
115 		device_t dev;
116 
117 		ev_iopf_p = (const struct
118 		    amdiommu_event_io_page_fault_entry *)evp;
119 		printf("\tPage Fault rid %#x dom %d",
120 		    ev_iopf_p->devid, ev_iopf_p->pasid);
121 		ctx = amdiommu_find_ctx_locked(unit, ev_iopf_p->devid);
122 		if (ctx != NULL) {
123 			dev = ctx->context.tag->owner;
124 			if (dev != NULL)
125 				printf(" %s", device_get_nameunit(dev));
126 		}
127 		printf("\n\t"
128 		    "gn %d nx %d us %d i %d pr %d rw %d pe %d rz %d tr %d"
129 		    "\n\tgaddr %#jx\n",
130 		    ev_iopf_p->gn, ev_iopf_p->nx, ev_iopf_p->us, ev_iopf_p->i,
131 		    ev_iopf_p->pr, ev_iopf_p->rw, ev_iopf_p->pe, ev_iopf_p->rz,
132 		    ev_iopf_p->tr,
133 		    (((uintmax_t)(ev_iopf_p->addr2)) << 32) |
134 		    ev_iopf_p->addr1);
135 	}
136 }
137 
138 static u_int
amdiommu_event_log_tail(struct amdiommu_unit * unit)139 amdiommu_event_log_tail(struct amdiommu_unit *unit)
140 {
141 	return (amdiommu_read8(unit, AMDIOMMU_EVNTLOG_TAIL) >>
142 	    AMDIOMMU_EV_SZ_SHIFT);
143 }
144 
145 static u_int
amdiommu_event_copy_log_inc(u_int idx)146 amdiommu_event_copy_log_inc(u_int idx)
147 {
148 	idx++;
149 	if (idx == nitems(((struct amdiommu_unit *)NULL)->event_copy_log))
150 		idx = 0;
151 	return (idx);
152 }
153 
154 static bool
amdiommu_event_copy_log_hasspace(struct amdiommu_unit * unit)155 amdiommu_event_copy_log_hasspace(struct amdiommu_unit *unit)
156 {
157 	return (unit->event_copy_tail != amdiommu_event_copy_log_inc(
158 	    unit->event_copy_head));
159 }
160 
161 void
amdiommu_event_intr(struct amdiommu_unit * unit,uint64_t status)162 amdiommu_event_intr(struct amdiommu_unit *unit, uint64_t status)
163 {
164 	struct amdiommu_event_generic *evp;
165 	u_int hw_tail, hw_tail1;
166 	bool enqueue;
167 
168 	enqueue = (status & AMDIOMMU_CMDEVS_EVOVRFLW) != 0;
169 
170 	hw_tail1 = amdiommu_event_log_tail(unit);
171 	do {
172 		hw_tail = hw_tail1;
173 		for (; hw_tail != unit->event_log_head;
174 		     amdiommu_event_log_inc_head(unit)) {
175 			evp = &unit->event_log[unit->event_log_head];
176 			mtx_lock_spin(&unit->event_lock);
177 			if (amdiommu_event_copy_log_hasspace(unit)) {
178 				unit->event_copy_log[unit->event_copy_head] =
179 				    *evp;
180 				unit->event_copy_head =
181 				    amdiommu_event_copy_log_inc(unit->
182 				    event_copy_head);
183 				enqueue = true;
184 			} else {
185 				amdiommu_event_log_print(unit, evp, false);
186 			}
187 			mtx_unlock_spin(&unit->event_lock);
188 		}
189 		amdiommu_write8(unit, AMDIOMMU_EVNTLOG_HEAD,
190 		    unit->event_log_head << AMDIOMMU_EV_SZ_SHIFT);
191 		hw_tail1 = amdiommu_event_log_tail(unit);
192 	} while (hw_tail1 != hw_tail);
193 	amdiommu_event_rearm_intr(unit);
194 
195 	if (enqueue)
196 		taskqueue_enqueue(unit->event_taskqueue, &unit->event_task);
197 }
198 
199 static void
amdiommu_event_task(void * arg,int pending __unused)200 amdiommu_event_task(void *arg, int pending __unused)
201 {
202 	struct amdiommu_unit *unit;
203 	uint64_t hwev_status, status;
204 	struct amdiommu_event_generic hwev;
205 
206 	unit = arg;
207 	AMDIOMMU_LOCK(unit);
208 
209 	if ((unit->efr & AMDIOMMU_EFR_HWEV_SUP) != 0) {
210 		hwev_status = amdiommu_read8(unit, AMDIOMMU_HWEV_STATUS);
211 		if ((hwev_status & AMDIOMMU_HWEVS_HEV) != 0) {
212 			*(uint64_t *)&hwev = amdiommu_read8(unit,
213 			    AMDIOMMU_HWEV_LOWER);
214 			*((uint64_t *)&hwev + 1) = amdiommu_read8(unit,
215 			    AMDIOMMU_HWEV_UPPER);
216 			printf("amdiommu%d: hw event%s\n", unit->iommu.unit,
217 			    (hwev_status & AMDIOMMU_HWEVS_HEO) != 0 ?
218 			    " (overflown)" : "");
219 			amdiommu_event_log_print(unit, &hwev, true);
220 			amdiommu_write8(unit, AMDIOMMU_HWEV_STATUS,
221 			    hwev_status);
222 		}
223 	}
224 
225 	status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
226 	if ((status & AMDIOMMU_CMDEVS_EVOVRFLW) != 0) {
227 		printf("amdiommu%d: event log overflow\n", unit->iommu.unit);
228 
229 		while ((status & AMDIOMMU_CMDEVS_EVLOGRUN) != 0) {
230 			DELAY(1);
231 			status = amdiommu_read8(unit, AMDIOMMU_CMDEV_STATUS);
232 		}
233 
234 		unit->hw_ctrl &= ~AMDIOMMU_CTRL_EVNTLOG_EN;
235 		amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
236 
237 		unit->event_log_head = 0;
238 		amdiommu_write8(unit, AMDIOMMU_EVNTLOG_HEAD, 0);
239 
240 		amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
241 		    AMDIOMMU_CMDEVS_EVOVRFLW);		/* RW1C */
242 
243 		unit->hw_ctrl |= AMDIOMMU_CTRL_EVNTLOG_EN;
244 		amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
245 
246 		amdiommu_event_rearm_intr(unit);
247 	}
248 
249 	mtx_lock_spin(&unit->event_lock);
250 	while (unit->event_copy_head != unit->event_copy_tail) {
251 		mtx_unlock_spin(&unit->event_lock);
252 		amdiommu_event_log_print(unit, &unit->event_copy_log[
253 		    unit->event_copy_tail], true);
254 		mtx_lock_spin(&unit->event_lock);
255 		unit->event_copy_tail = amdiommu_event_copy_log_inc(unit->
256 		    event_copy_tail);
257 	}
258 	mtx_unlock_spin(&unit->event_lock);
259 
260 	AMDIOMMU_UNLOCK(unit);
261 }
262 
263 int
amdiommu_init_event(struct amdiommu_unit * unit)264 amdiommu_init_event(struct amdiommu_unit *unit)
265 {
266 	uint64_t base_reg;
267 
268 	mtx_init(&unit->event_lock, "amdevl", NULL, MTX_SPIN);
269 
270 	/*  event log entries */
271 	unit->event_log_size = AMDIOMMU_EVNTLOG_MIN;
272 	TUNABLE_INT_FETCH("hw.amdiommu.event_log_size", &unit->event_log_size);
273 	if (unit->event_log_size < AMDIOMMU_EVNTLOG_MIN ||
274 	    unit->event_log_size > AMDIOMMU_EVNTLOG_MAX ||
275 	    !powerof2(unit->event_log_size))
276 		panic("invalid hw.amdiommu.event_log_size");
277 	unit->event_log = kmem_alloc_contig(AMDIOMMU_EV_SZ *
278 	    unit->event_log_size, M_WAITOK | M_ZERO, 0, ~0ull, PAGE_SIZE,
279 	    0, VM_MEMATTR_DEFAULT);
280 
281 	TASK_INIT(&unit->event_task, 0, amdiommu_event_task, unit);
282 	unit->event_taskqueue = taskqueue_create_fast("amdiommuff", M_WAITOK,
283 	    taskqueue_thread_enqueue, &unit->event_taskqueue);
284 	taskqueue_start_threads(&unit->event_taskqueue, 1, PI_AV,
285 	    "amdiommu%d event taskq", unit->iommu.unit);
286 
287 	base_reg = pmap_kextract((vm_offset_t)unit->event_log) |
288 	    (((uint64_t)0x8 + ilog2(unit->event_log_size /
289 	    AMDIOMMU_EVNTLOG_MIN)) << AMDIOMMU_EVNTLOG_BASE_SZSHIFT);
290 	AMDIOMMU_LOCK(unit);
291 	/*
292 	 * Re-arm before enabling interrupt, to not loose it when
293 	 * re-arming in the interrupt handler.
294 	 */
295 	amdiommu_event_rearm_intr(unit);
296 	amdiommu_write8(unit, AMDIOMMU_EVNTLOG_BASE, base_reg);
297 	unit->hw_ctrl |= AMDIOMMU_CTRL_EVNTLOG_EN | AMDIOMMU_CTRL_EVENTINT_EN;
298 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
299 	AMDIOMMU_UNLOCK(unit);
300 
301 	return (0);
302 }
303 
304 void
amdiommu_fini_event(struct amdiommu_unit * unit)305 amdiommu_fini_event(struct amdiommu_unit *unit)
306 {
307 	AMDIOMMU_LOCK(unit);
308 	unit->hw_ctrl &= ~(AMDIOMMU_CTRL_EVNTLOG_EN |
309 	    AMDIOMMU_CTRL_EVENTINT_EN);
310 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
311 	amdiommu_write8(unit, AMDIOMMU_EVNTLOG_BASE, 0);
312 	AMDIOMMU_UNLOCK(unit);
313 
314 	taskqueue_drain(unit->event_taskqueue, &unit->event_task);
315 	taskqueue_free(unit->event_taskqueue);
316 	unit->event_taskqueue = NULL;
317 
318 	kmem_free(unit->event_log, unit->event_log_size * AMDIOMMU_EV_SZ);
319 	unit->event_log = NULL;
320 	unit->event_log_head = unit->event_log_tail = 0;
321 
322 	mtx_destroy(&unit->event_lock);
323 }
324