xref: /freebsd/sys/x86/iommu/intel_fault.c (revision 105fd928b0b5b35ab529e5f6914788dc49582901)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_acpi.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/memdesc.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/taskqueue.h>
44 #include <sys/tree.h>
45 #include <sys/vmem.h>
46 #include <machine/bus.h>
47 #include <contrib/dev/acpica/include/acpi.h>
48 #include <contrib/dev/acpica/include/accommon.h>
49 #include <dev/acpica/acpivar.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_map.h>
57 #include <x86/include/busdma_impl.h>
58 #include <x86/iommu/intel_reg.h>
59 #include <dev/iommu/busdma_iommu.h>
60 #include <x86/iommu/intel_dmar.h>
61 
62 /*
63  * Fault interrupt handling for DMARs.  If advanced fault logging is
64  * not implemented by hardware, the code emulates it.  Fast interrupt
65  * handler flushes the fault registers into circular buffer at
66  * unit->fault_log, and schedules a task.
67  *
68  * The fast handler is used since faults usually come in bursts, and
69  * number of fault log registers is limited, e.g. down to one for 5400
70  * MCH.  We are trying to reduce the latency for clearing the fault
71  * register file.  The task is usually long-running, since printf() is
72  * slow, but this is not problematic because bursts are rare.
73  *
74  * For the same reason, each translation unit task is executed in its
75  * own thread.
76  *
77  * XXXKIB It seems there is no hardware available which implements
78  * advanced fault logging, so the code to handle AFL is not written.
79  */
80 
81 static int
82 dmar_fault_next(struct dmar_unit *unit, int faultp)
83 {
84 
85 	faultp += 2;
86 	if (faultp == unit->fault_log_size)
87 		faultp = 0;
88 	return (faultp);
89 }
90 
91 static void
92 dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts)
93 {
94 	uint32_t clear;
95 
96 	clear = 0;
97 	if ((fsts & DMAR_FSTS_ITE) != 0) {
98 		printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit);
99 		clear |= DMAR_FSTS_ITE;
100 	}
101 	if ((fsts & DMAR_FSTS_ICE) != 0) {
102 		printf("DMAR%d: Invalidation completion error\n",
103 		    unit->iommu.unit);
104 		clear |= DMAR_FSTS_ICE;
105 	}
106 	if ((fsts & DMAR_FSTS_IQE) != 0) {
107 		printf("DMAR%d: Invalidation queue error\n",
108 		    unit->iommu.unit);
109 		clear |= DMAR_FSTS_IQE;
110 	}
111 	if ((fsts & DMAR_FSTS_APF) != 0) {
112 		printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit);
113 		clear |= DMAR_FSTS_APF;
114 	}
115 	if ((fsts & DMAR_FSTS_AFO) != 0) {
116 		printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit);
117 		clear |= DMAR_FSTS_AFO;
118 	}
119 	if (clear != 0)
120 		dmar_write4(unit, DMAR_FSTS_REG, clear);
121 }
122 
123 int
124 dmar_fault_intr(void *arg)
125 {
126 	struct dmar_unit *unit;
127 	uint64_t fault_rec[2];
128 	uint32_t fsts;
129 	int fri, frir, faultp;
130 	bool enqueue;
131 
132 	unit = arg;
133 	enqueue = false;
134 	fsts = dmar_read4(unit, DMAR_FSTS_REG);
135 	dmar_fault_intr_clear(unit, fsts);
136 
137 	if ((fsts & DMAR_FSTS_PPF) == 0)
138 		goto done;
139 
140 	fri = DMAR_FSTS_FRI(fsts);
141 	for (;;) {
142 		frir = (DMAR_CAP_FRO(unit->hw_cap) + fri) * 16;
143 		fault_rec[1] = dmar_read8(unit, frir + 8);
144 		if ((fault_rec[1] & DMAR_FRCD2_F) == 0)
145 			break;
146 		fault_rec[0] = dmar_read8(unit, frir);
147 		dmar_write4(unit, frir + 12, DMAR_FRCD2_F32);
148 		DMAR_FAULT_LOCK(unit);
149 		faultp = unit->fault_log_head;
150 		if (dmar_fault_next(unit, faultp) == unit->fault_log_tail) {
151 			/* XXXKIB log overflow */
152 		} else {
153 			unit->fault_log[faultp] = fault_rec[0];
154 			unit->fault_log[faultp + 1] = fault_rec[1];
155 			unit->fault_log_head = dmar_fault_next(unit, faultp);
156 			enqueue = true;
157 		}
158 		DMAR_FAULT_UNLOCK(unit);
159 		fri += 1;
160 		if (fri >= DMAR_CAP_NFR(unit->hw_cap))
161 			fri = 0;
162 	}
163 
164 done:
165 	/*
166 	 * On SandyBridge, due to errata BJ124, IvyBridge errata
167 	 * BV100, and Haswell errata HSD40, "Spurious Intel VT-d
168 	 * Interrupts May Occur When the PFO Bit is Set".  Handle the
169 	 * cases by clearing overflow bit even if no fault is
170 	 * reported.
171 	 *
172 	 * On IvyBridge, errata BV30 states that clearing clear
173 	 * DMAR_FRCD2_F bit in the fault register causes spurious
174 	 * interrupt.  Do nothing.
175 	 *
176 	 */
177 	if ((fsts & DMAR_FSTS_PFO) != 0) {
178 		printf("DMAR%d: Fault Overflow\n", unit->iommu.unit);
179 		dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO);
180 	}
181 
182 	if (enqueue) {
183 		taskqueue_enqueue(unit->fault_taskqueue,
184 		    &unit->fault_task);
185 	}
186 	return (FILTER_HANDLED);
187 }
188 
189 static void
190 dmar_fault_task(void *arg, int pending __unused)
191 {
192 	struct dmar_unit *unit;
193 	struct dmar_ctx *ctx;
194 	uint64_t fault_rec[2];
195 	int sid, bus, slot, func, faultp;
196 
197 	unit = arg;
198 	DMAR_FAULT_LOCK(unit);
199 	for (;;) {
200 		faultp = unit->fault_log_tail;
201 		if (faultp == unit->fault_log_head)
202 			break;
203 
204 		fault_rec[0] = unit->fault_log[faultp];
205 		fault_rec[1] = unit->fault_log[faultp + 1];
206 		unit->fault_log_tail = dmar_fault_next(unit, faultp);
207 		DMAR_FAULT_UNLOCK(unit);
208 
209 		sid = DMAR_FRCD2_SID(fault_rec[1]);
210 		printf("DMAR%d: ", unit->iommu.unit);
211 		DMAR_LOCK(unit);
212 		ctx = dmar_find_ctx_locked(unit, sid);
213 		if (ctx == NULL) {
214 			printf("<unknown dev>:");
215 
216 			/*
217 			 * Note that the slot and function will not be correct
218 			 * if ARI is in use, but without a ctx entry we have
219 			 * no way of knowing whether ARI is in use or not.
220 			 */
221 			bus = PCI_RID2BUS(sid);
222 			slot = PCI_RID2SLOT(sid);
223 			func = PCI_RID2FUNC(sid);
224 		} else {
225 			ctx->context.flags |= IOMMU_CTX_FAULTED;
226 			ctx->last_fault_rec[0] = fault_rec[0];
227 			ctx->last_fault_rec[1] = fault_rec[1];
228 			device_print_prettyname(ctx->context.tag->owner);
229 			bus = pci_get_bus(ctx->context.tag->owner);
230 			slot = pci_get_slot(ctx->context.tag->owner);
231 			func = pci_get_function(ctx->context.tag->owner);
232 		}
233 		DMAR_UNLOCK(unit);
234 		printf(
235 		    "pci%d:%d:%d sid %x fault acc %x adt 0x%x reason 0x%x "
236 		    "addr %jx\n",
237 		    bus, slot, func, sid, DMAR_FRCD2_T(fault_rec[1]),
238 		    DMAR_FRCD2_AT(fault_rec[1]), DMAR_FRCD2_FR(fault_rec[1]),
239 		    (uintmax_t)fault_rec[0]);
240 		DMAR_FAULT_LOCK(unit);
241 	}
242 	DMAR_FAULT_UNLOCK(unit);
243 }
244 
245 static void
246 dmar_clear_faults(struct dmar_unit *unit)
247 {
248 	uint32_t frec, frir, fsts;
249 	int i;
250 
251 	for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) {
252 		frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16;
253 		frec = dmar_read4(unit, frir + 12);
254 		if ((frec & DMAR_FRCD2_F32) == 0)
255 			continue;
256 		dmar_write4(unit, frir + 12, DMAR_FRCD2_F32);
257 	}
258 	fsts = dmar_read4(unit, DMAR_FSTS_REG);
259 	dmar_write4(unit, DMAR_FSTS_REG, fsts);
260 }
261 
262 int
263 dmar_init_fault_log(struct dmar_unit *unit)
264 {
265 
266 	mtx_init(&unit->fault_lock, "dmarflt", NULL, MTX_SPIN);
267 	unit->fault_log_size = 256; /* 128 fault log entries */
268 	TUNABLE_INT_FETCH("hw.dmar.fault_log_size", &unit->fault_log_size);
269 	if (unit->fault_log_size % 2 != 0)
270 		panic("hw.dmar_fault_log_size must be even");
271 	unit->fault_log = malloc(sizeof(uint64_t) * unit->fault_log_size,
272 	    M_DEVBUF, M_WAITOK | M_ZERO);
273 
274 	TASK_INIT(&unit->fault_task, 0, dmar_fault_task, unit);
275 	unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK,
276 	    taskqueue_thread_enqueue, &unit->fault_taskqueue);
277 	taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
278 	    "dmar%d fault taskq", unit->iommu.unit);
279 
280 	DMAR_LOCK(unit);
281 	dmar_disable_fault_intr(unit);
282 	dmar_clear_faults(unit);
283 	dmar_enable_fault_intr(unit);
284 	DMAR_UNLOCK(unit);
285 
286 	return (0);
287 }
288 
289 void
290 dmar_fini_fault_log(struct dmar_unit *unit)
291 {
292 
293 	if (unit->fault_taskqueue == NULL)
294 		return;
295 
296 	DMAR_LOCK(unit);
297 	dmar_disable_fault_intr(unit);
298 	DMAR_UNLOCK(unit);
299 
300 	taskqueue_drain(unit->fault_taskqueue, &unit->fault_task);
301 	taskqueue_free(unit->fault_taskqueue);
302 	unit->fault_taskqueue = NULL;
303 	mtx_destroy(&unit->fault_lock);
304 
305 	free(unit->fault_log, M_DEVBUF);
306 	unit->fault_log = NULL;
307 	unit->fault_log_head = unit->fault_log_tail = 0;
308 }
309 
310 void
311 dmar_enable_fault_intr(struct dmar_unit *unit)
312 {
313 	uint32_t fectl;
314 
315 	DMAR_ASSERT_LOCKED(unit);
316 	fectl = dmar_read4(unit, DMAR_FECTL_REG);
317 	fectl &= ~DMAR_FECTL_IM;
318 	dmar_write4(unit, DMAR_FECTL_REG, fectl);
319 }
320 
321 void
322 dmar_disable_fault_intr(struct dmar_unit *unit)
323 {
324 	uint32_t fectl;
325 
326 	DMAR_ASSERT_LOCKED(unit);
327 	fectl = dmar_read4(unit, DMAR_FECTL_REG);
328 	dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM);
329 }
330