xref: /freebsd/sys/x86/iommu/intel_fault.c (revision 6137b5f7b8c183ee8806d79b3f1d8e5e3ddb3df3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include "opt_acpi.h"
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/memdesc.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 #include <sys/taskqueue.h>
41 #include <sys/tree.h>
42 #include <sys/vmem.h>
43 #include <machine/bus.h>
44 #include <contrib/dev/acpica/include/acpi.h>
45 #include <contrib/dev/acpica/include/accommon.h>
46 #include <dev/acpica/acpivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <x86/include/busdma_impl.h>
55 #include <x86/iommu/intel_reg.h>
56 #include <dev/iommu/busdma_iommu.h>
57 #include <x86/iommu/intel_dmar.h>
58 
59 /*
60  * Fault interrupt handling for DMARs.  If advanced fault logging is
61  * not implemented by hardware, the code emulates it.  Fast interrupt
62  * handler flushes the fault registers into circular buffer at
63  * unit->fault_log, and schedules a task.
64  *
65  * The fast handler is used since faults usually come in bursts, and
66  * number of fault log registers is limited, e.g. down to one for 5400
67  * MCH.  We are trying to reduce the latency for clearing the fault
68  * register file.  The task is usually long-running, since printf() is
69  * slow, but this is not problematic because bursts are rare.
70  *
71  * For the same reason, each translation unit task is executed in its
72  * own thread.
73  *
74  * XXXKIB It seems there is no hardware available which implements
75  * advanced fault logging, so the code to handle AFL is not written.
76  */
77 
78 static int
79 dmar_fault_next(struct dmar_unit *unit, int faultp)
80 {
81 
82 	faultp += 2;
83 	if (faultp == unit->fault_log_size)
84 		faultp = 0;
85 	return (faultp);
86 }
87 
88 static void
89 dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts)
90 {
91 	uint32_t clear;
92 
93 	clear = 0;
94 	if ((fsts & DMAR_FSTS_ITE) != 0) {
95 		printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit);
96 		clear |= DMAR_FSTS_ITE;
97 	}
98 	if ((fsts & DMAR_FSTS_ICE) != 0) {
99 		printf("DMAR%d: Invalidation completion error\n",
100 		    unit->iommu.unit);
101 		clear |= DMAR_FSTS_ICE;
102 	}
103 	if ((fsts & DMAR_FSTS_IQE) != 0) {
104 		printf("DMAR%d: Invalidation queue error\n",
105 		    unit->iommu.unit);
106 		clear |= DMAR_FSTS_IQE;
107 	}
108 	if ((fsts & DMAR_FSTS_APF) != 0) {
109 		printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit);
110 		clear |= DMAR_FSTS_APF;
111 	}
112 	if ((fsts & DMAR_FSTS_AFO) != 0) {
113 		printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit);
114 		clear |= DMAR_FSTS_AFO;
115 	}
116 	if (clear != 0)
117 		dmar_write4(unit, DMAR_FSTS_REG, clear);
118 }
119 
120 int
121 dmar_fault_intr(void *arg)
122 {
123 	struct dmar_unit *unit;
124 	uint64_t fault_rec[2];
125 	uint32_t fsts;
126 	int fri, frir, faultp;
127 	bool enqueue;
128 
129 	unit = arg;
130 	enqueue = false;
131 	fsts = dmar_read4(unit, DMAR_FSTS_REG);
132 	dmar_fault_intr_clear(unit, fsts);
133 
134 	if ((fsts & DMAR_FSTS_PPF) == 0)
135 		goto done;
136 
137 	fri = DMAR_FSTS_FRI(fsts);
138 	for (;;) {
139 		frir = (DMAR_CAP_FRO(unit->hw_cap) + fri) * 16;
140 		fault_rec[1] = dmar_read8(unit, frir + 8);
141 		if ((fault_rec[1] & DMAR_FRCD2_F) == 0)
142 			break;
143 		fault_rec[0] = dmar_read8(unit, frir);
144 		dmar_write4(unit, frir + 12, DMAR_FRCD2_F32);
145 		DMAR_FAULT_LOCK(unit);
146 		faultp = unit->fault_log_head;
147 		if (dmar_fault_next(unit, faultp) == unit->fault_log_tail) {
148 			/* XXXKIB log overflow */
149 		} else {
150 			unit->fault_log[faultp] = fault_rec[0];
151 			unit->fault_log[faultp + 1] = fault_rec[1];
152 			unit->fault_log_head = dmar_fault_next(unit, faultp);
153 			enqueue = true;
154 		}
155 		DMAR_FAULT_UNLOCK(unit);
156 		fri += 1;
157 		if (fri >= DMAR_CAP_NFR(unit->hw_cap))
158 			fri = 0;
159 	}
160 
161 done:
162 	/*
163 	 * On SandyBridge, due to errata BJ124, IvyBridge errata
164 	 * BV100, and Haswell errata HSD40, "Spurious Intel VT-d
165 	 * Interrupts May Occur When the PFO Bit is Set".  Handle the
166 	 * cases by clearing overflow bit even if no fault is
167 	 * reported.
168 	 *
169 	 * On IvyBridge, errata BV30 states that clearing clear
170 	 * DMAR_FRCD2_F bit in the fault register causes spurious
171 	 * interrupt.  Do nothing.
172 	 *
173 	 */
174 	if ((fsts & DMAR_FSTS_PFO) != 0) {
175 		printf("DMAR%d: Fault Overflow\n", unit->iommu.unit);
176 		dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO);
177 	}
178 
179 	if (enqueue) {
180 		taskqueue_enqueue(unit->fault_taskqueue,
181 		    &unit->fault_task);
182 	}
183 	return (FILTER_HANDLED);
184 }
185 
186 static void
187 dmar_fault_task(void *arg, int pending __unused)
188 {
189 	struct dmar_unit *unit;
190 	struct dmar_ctx *ctx;
191 	uint64_t fault_rec[2];
192 	int sid, bus, slot, func, faultp;
193 
194 	unit = arg;
195 	DMAR_FAULT_LOCK(unit);
196 	for (;;) {
197 		faultp = unit->fault_log_tail;
198 		if (faultp == unit->fault_log_head)
199 			break;
200 
201 		fault_rec[0] = unit->fault_log[faultp];
202 		fault_rec[1] = unit->fault_log[faultp + 1];
203 		unit->fault_log_tail = dmar_fault_next(unit, faultp);
204 		DMAR_FAULT_UNLOCK(unit);
205 
206 		sid = DMAR_FRCD2_SID(fault_rec[1]);
207 		printf("DMAR%d: ", unit->iommu.unit);
208 		DMAR_LOCK(unit);
209 		ctx = dmar_find_ctx_locked(unit, sid);
210 		if (ctx == NULL) {
211 			printf("<unknown dev>:");
212 
213 			/*
214 			 * Note that the slot and function will not be correct
215 			 * if ARI is in use, but without a ctx entry we have
216 			 * no way of knowing whether ARI is in use or not.
217 			 */
218 			bus = PCI_RID2BUS(sid);
219 			slot = PCI_RID2SLOT(sid);
220 			func = PCI_RID2FUNC(sid);
221 		} else {
222 			ctx->context.flags |= IOMMU_CTX_FAULTED;
223 			ctx->last_fault_rec[0] = fault_rec[0];
224 			ctx->last_fault_rec[1] = fault_rec[1];
225 			device_print_prettyname(ctx->context.tag->owner);
226 			bus = pci_get_bus(ctx->context.tag->owner);
227 			slot = pci_get_slot(ctx->context.tag->owner);
228 			func = pci_get_function(ctx->context.tag->owner);
229 		}
230 		DMAR_UNLOCK(unit);
231 		printf(
232 		    "pci%d:%d:%d sid %x fault acc %x adt 0x%x reason 0x%x "
233 		    "addr %jx\n",
234 		    bus, slot, func, sid, DMAR_FRCD2_T(fault_rec[1]),
235 		    DMAR_FRCD2_AT(fault_rec[1]), DMAR_FRCD2_FR(fault_rec[1]),
236 		    (uintmax_t)fault_rec[0]);
237 		DMAR_FAULT_LOCK(unit);
238 	}
239 	DMAR_FAULT_UNLOCK(unit);
240 }
241 
242 static void
243 dmar_clear_faults(struct dmar_unit *unit)
244 {
245 	uint32_t frec, frir, fsts;
246 	int i;
247 
248 	for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) {
249 		frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16;
250 		frec = dmar_read4(unit, frir + 12);
251 		if ((frec & DMAR_FRCD2_F32) == 0)
252 			continue;
253 		dmar_write4(unit, frir + 12, DMAR_FRCD2_F32);
254 	}
255 	fsts = dmar_read4(unit, DMAR_FSTS_REG);
256 	dmar_write4(unit, DMAR_FSTS_REG, fsts);
257 }
258 
259 int
260 dmar_init_fault_log(struct dmar_unit *unit)
261 {
262 
263 	mtx_init(&unit->fault_lock, "dmarflt", NULL, MTX_SPIN);
264 	unit->fault_log_size = 256; /* 128 fault log entries */
265 	TUNABLE_INT_FETCH("hw.dmar.fault_log_size", &unit->fault_log_size);
266 	if (unit->fault_log_size % 2 != 0)
267 		panic("hw.dmar_fault_log_size must be even");
268 	unit->fault_log = malloc(sizeof(uint64_t) * unit->fault_log_size,
269 	    M_DEVBUF, M_WAITOK | M_ZERO);
270 
271 	TASK_INIT(&unit->fault_task, 0, dmar_fault_task, unit);
272 	unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK,
273 	    taskqueue_thread_enqueue, &unit->fault_taskqueue);
274 	taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
275 	    "dmar%d fault taskq", unit->iommu.unit);
276 
277 	DMAR_LOCK(unit);
278 	dmar_disable_fault_intr(unit);
279 	dmar_clear_faults(unit);
280 	dmar_enable_fault_intr(unit);
281 	DMAR_UNLOCK(unit);
282 
283 	return (0);
284 }
285 
286 void
287 dmar_fini_fault_log(struct dmar_unit *unit)
288 {
289 
290 	if (unit->fault_taskqueue == NULL)
291 		return;
292 
293 	DMAR_LOCK(unit);
294 	dmar_disable_fault_intr(unit);
295 	DMAR_UNLOCK(unit);
296 
297 	taskqueue_drain(unit->fault_taskqueue, &unit->fault_task);
298 	taskqueue_free(unit->fault_taskqueue);
299 	unit->fault_taskqueue = NULL;
300 	mtx_destroy(&unit->fault_lock);
301 
302 	free(unit->fault_log, M_DEVBUF);
303 	unit->fault_log = NULL;
304 	unit->fault_log_head = unit->fault_log_tail = 0;
305 }
306 
307 void
308 dmar_enable_fault_intr(struct dmar_unit *unit)
309 {
310 	uint32_t fectl;
311 
312 	DMAR_ASSERT_LOCKED(unit);
313 	fectl = dmar_read4(unit, DMAR_FECTL_REG);
314 	fectl &= ~DMAR_FECTL_IM;
315 	dmar_write4(unit, DMAR_FECTL_REG, fectl);
316 }
317 
318 void
319 dmar_disable_fault_intr(struct dmar_unit *unit)
320 {
321 	uint32_t fectl;
322 
323 	DMAR_ASSERT_LOCKED(unit);
324 	fectl = dmar_read4(unit, DMAR_FECTL_REG);
325 	dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM);
326 }
327