xref: /freebsd/sys/x86/iommu/intel_intrmap.c (revision 705090cba428525507fd0e8597cbf1cf8a68ae35)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  *
4  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/domainset.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/memdesc.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/rwlock.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
42 #include <sys/tree.h>
43 #include <sys/vmem.h>
44 #include <vm/vm.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_page.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <machine/bus.h>
52 #include <machine/intr_machdep.h>
53 #include <x86/include/apicreg.h>
54 #include <x86/include/apicvar.h>
55 #include <x86/include/busdma_impl.h>
56 #include <dev/iommu/busdma_iommu.h>
57 #include <x86/iommu/intel_reg.h>
58 #include <x86/iommu/x86_iommu.h>
59 #include <x86/iommu/intel_dmar.h>
60 #include <x86/iommu/iommu_intrmap.h>
61 
62 static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
63     int *is_dmar);
64 static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
65     uint64_t low, uint16_t rid);
66 static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
67 
68 int
dmar_alloc_msi_intr(device_t src,u_int * cookies,u_int count)69 dmar_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
70 {
71 	struct dmar_unit *unit;
72 	vmem_addr_t vmem_res;
73 	u_int idx, i;
74 	int error;
75 
76 	unit = dmar_ir_find(src, NULL, NULL);
77 	if (unit == NULL || !unit->ir_enabled) {
78 		for (i = 0; i < count; i++)
79 			cookies[i] = -1;
80 		return (EOPNOTSUPP);
81 	}
82 
83 	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
84 	    &vmem_res);
85 	if (error != 0) {
86 		KASSERT(error != EOPNOTSUPP,
87 		    ("impossible EOPNOTSUPP from vmem"));
88 		return (error);
89 	}
90 	idx = vmem_res;
91 	for (i = 0; i < count; i++)
92 		cookies[i] = idx + i;
93 	return (0);
94 }
95 
96 int
dmar_map_msi_intr(device_t src,u_int cpu,u_int vector,u_int cookie,uint64_t * addr,uint32_t * data)97 dmar_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
98     uint64_t *addr, uint32_t *data)
99 {
100 	struct dmar_unit *unit;
101 	uint64_t low;
102 	uint16_t rid;
103 	int is_dmar;
104 
105 	unit = dmar_ir_find(src, &rid, &is_dmar);
106 	if (is_dmar) {
107 		KASSERT(unit == NULL, ("DMAR cannot translate itself"));
108 
109 		/*
110 		 * See VT-d specification, 5.1.6 Remapping Hardware -
111 		 * Interrupt Programming.
112 		 */
113 		*data = vector;
114 		*addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
115 		if (x2apic_mode)
116 			*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
117 		else
118 			KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
119 		return (0);
120 	}
121 	if (unit == NULL || !unit->ir_enabled || cookie == -1)
122 		return (EOPNOTSUPP);
123 
124 	low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
125 	    DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
126 	    DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
127 	    DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
128 	dmar_ir_program_irte(unit, cookie, low, rid);
129 
130 	if (addr != NULL) {
131 		/*
132 		 * See VT-d specification, 5.1.5.2 MSI and MSI-X
133 		 * Register Programming.
134 		 */
135 		*addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
136 		    ((cookie & 0x8000) << 2) | 0x18;
137 		*data = 0;
138 	}
139 	return (0);
140 }
141 
142 int
dmar_unmap_msi_intr(device_t src,u_int cookie)143 dmar_unmap_msi_intr(device_t src, u_int cookie)
144 {
145 	struct dmar_unit *unit;
146 
147 	if (cookie == -1)
148 		return (0);
149 	unit = dmar_ir_find(src, NULL, NULL);
150 	return (dmar_ir_free_irte(unit, cookie));
151 }
152 
153 int
dmar_map_ioapic_intr(u_int ioapic_id,u_int cpu,u_int vector,bool edge,bool activehi,int irq,u_int * cookie,uint32_t * hi,uint32_t * lo)154 dmar_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
155     bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
156 {
157 	struct dmar_unit *unit;
158 	vmem_addr_t vmem_res;
159 	uint64_t low, iorte;
160 	u_int idx;
161 	int error;
162 	uint16_t rid;
163 
164 	unit = dmar_find_ioapic(ioapic_id, &rid);
165 	if (unit == NULL || !unit->ir_enabled) {
166 		*cookie = -1;
167 		return (EOPNOTSUPP);
168 	}
169 
170 	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
171 	if (error != 0) {
172 		KASSERT(error != EOPNOTSUPP,
173 		    ("impossible EOPNOTSUPP from vmem"));
174 		return (error);
175 	}
176 	idx = vmem_res;
177 	low = 0;
178 	switch (irq) {
179 	case IRQ_EXTINT:
180 		low |= DMAR_IRTE1_DLM_ExtINT;
181 		break;
182 	case IRQ_NMI:
183 		low |= DMAR_IRTE1_DLM_NMI;
184 		break;
185 	case IRQ_SMI:
186 		low |= DMAR_IRTE1_DLM_SMI;
187 		break;
188 	default:
189 		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
190 		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
191 		break;
192 	}
193 	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
194 	    DMAR_IRTE1_DST_xAPIC(cpu)) |
195 	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
196 	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
197 	dmar_ir_program_irte(unit, idx, low, rid);
198 
199 	if (hi != NULL) {
200 		/*
201 		 * See VT-d specification, 5.1.5.1 I/OxAPIC
202 		 * Programming.
203 		 */
204 		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
205 		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
206 		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
207 		    (activehi ? IOART_INTAHI : IOART_INTALO) |
208 		    IOART_DELFIXED | vector;
209 		*hi = iorte >> 32;
210 		*lo = iorte;
211 	}
212 	*cookie = idx;
213 	return (0);
214 }
215 
216 int
dmar_unmap_ioapic_intr(u_int ioapic_id,u_int * cookie)217 dmar_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
218 {
219 	struct dmar_unit *unit;
220 	u_int idx;
221 
222 	idx = *cookie;
223 	if (idx == -1)
224 		return (0);
225 	*cookie = -1;
226 	unit = dmar_find_ioapic(ioapic_id, NULL);
227 	KASSERT(unit != NULL && unit->ir_enabled,
228 	    ("unmap: cookie %d unit %p", idx, unit));
229 	return (dmar_ir_free_irte(unit, idx));
230 }
231 
232 static struct dmar_unit *
dmar_ir_find(device_t src,uint16_t * rid,int * is_dmar)233 dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
234 {
235 	devclass_t src_class;
236 	struct dmar_unit *unit;
237 
238 	/*
239 	 * We need to determine if the interrupt source generates FSB
240 	 * interrupts.  If yes, it is either DMAR, in which case
241 	 * interrupts are not remapped.  Or it is HPET, and interrupts
242 	 * are remapped.  For HPET, source id is reported by HPET
243 	 * record in DMAR ACPI table.
244 	 */
245 	if (is_dmar != NULL)
246 		*is_dmar = FALSE;
247 	src_class = device_get_devclass(src);
248 	if (src_class == devclass_find("dmar")) {
249 		unit = NULL;
250 		if (is_dmar != NULL)
251 			*is_dmar = TRUE;
252 	} else if (src_class == devclass_find("hpet")) {
253 		unit = dmar_find_hpet(src, rid);
254 	} else {
255 		unit = dmar_find(src, bootverbose);
256 		if (unit != NULL && rid != NULL)
257 			iommu_get_requester(src, rid);
258 	}
259 	return (unit);
260 }
261 
262 static void
dmar_ir_program_irte(struct dmar_unit * unit,u_int idx,uint64_t low,uint16_t rid)263 dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
264     uint16_t rid)
265 {
266 	dmar_irte_t *irte;
267 	uint64_t high;
268 
269 	KASSERT(idx < unit->irte_cnt,
270 	    ("bad cookie %d %d", idx, unit->irte_cnt));
271 	irte = &(unit->irt[idx]);
272 	high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
273 	    DMAR_IRTE2_SID_RID(rid);
274 	if (bootverbose) {
275 		device_printf(unit->iommu.dev,
276 		    "programming irte[%d] rid %#x high %#jx low %#jx\n",
277 		    idx, rid, (uintmax_t)high, (uintmax_t)low);
278 	}
279 	DMAR_LOCK(unit);
280 	if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
281 		/*
282 		 * The rte is already valid.  Assume that the request
283 		 * is to remap the interrupt for balancing.  Only low
284 		 * word of rte needs to be changed.  Assert that the
285 		 * high word contains expected value.
286 		 */
287 		KASSERT(irte->irte2 == high,
288 		    ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
289 		    (uintmax_t)high));
290 		dmar_pte_update(&irte->irte1, low);
291 	} else {
292 		dmar_pte_store(&irte->irte2, high);
293 		dmar_pte_store(&irte->irte1, low);
294 	}
295 	dmar_qi_invalidate_iec(unit, idx, 1);
296 	DMAR_UNLOCK(unit);
297 
298 }
299 
300 static int
dmar_ir_free_irte(struct dmar_unit * unit,u_int cookie)301 dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
302 {
303 	dmar_irte_t *irte;
304 
305 	KASSERT(unit != NULL && unit->ir_enabled,
306 	    ("unmap: cookie %d unit %p", cookie, unit));
307 	KASSERT(cookie < unit->irte_cnt,
308 	    ("bad cookie %u %u", cookie, unit->irte_cnt));
309 	irte = &(unit->irt[cookie]);
310 	dmar_pte_clear(&irte->irte1);
311 	dmar_pte_clear(&irte->irte2);
312 	DMAR_LOCK(unit);
313 	dmar_qi_invalidate_iec(unit, cookie, 1);
314 	DMAR_UNLOCK(unit);
315 	vmem_free(unit->irtids, cookie, 1);
316 	return (0);
317 }
318 
319 int
dmar_init_irt(struct dmar_unit * unit)320 dmar_init_irt(struct dmar_unit *unit)
321 {
322 	SYSCTL_ADD_INT(&unit->iommu.sysctl_ctx,
323 	    SYSCTL_CHILDREN(device_get_sysctl_tree(unit->iommu.dev)),
324 	    OID_AUTO, "ir", CTLFLAG_RD, &unit->ir_enabled, 0,
325 	    "Interrupt remapping ops enabled");
326 	if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
327 		return (0);
328 	unit->ir_enabled = 1;
329 	TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
330 	TUNABLE_INT_FETCH("hw.iommu.ir", &unit->ir_enabled);
331 	if (!unit->ir_enabled)
332 		return (0);
333 	if (!unit->qi_enabled) {
334 		unit->ir_enabled = 0;
335 		if (bootverbose)
336 			device_printf(unit->iommu.dev,
337 	     "QI disabled, disabling interrupt remapping\n");
338 		return (0);
339 	}
340 	unit->irte_cnt = roundup_pow_of_two(num_io_irqs);
341 	if (unit->memdomain == -1) {
342 		unit->irt = kmem_alloc_contig(
343 		    unit->irte_cnt * sizeof(dmar_irte_t),
344 		    M_ZERO | M_WAITOK, 0, iommu_high, PAGE_SIZE, 0,
345 		    DMAR_IS_COHERENT(unit) ?
346 		    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
347 	} else {
348 		unit->irt = kmem_alloc_contig_domainset(
349 		    DOMAINSET_PREF(unit->memdomain),
350 		    unit->irte_cnt * sizeof(dmar_irte_t),
351 		    M_ZERO | M_WAITOK, 0, iommu_high, PAGE_SIZE, 0,
352 		    DMAR_IS_COHERENT(unit) ?
353 		    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
354 	}
355 	if (unit->irt == NULL)
356 		return (ENOMEM);
357 	unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
358 	unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
359 	    M_FIRSTFIT | M_NOWAIT);
360 	DMAR_LOCK(unit);
361 	dmar_load_irt_ptr(unit);
362 	dmar_qi_invalidate_iec_glob(unit);
363 	DMAR_UNLOCK(unit);
364 
365 	/*
366 	 * Initialize mappings for already configured interrupt pins.
367 	 * Required, because otherwise the interrupts fault without
368 	 * irtes.
369 	 */
370 	intr_reprogram();
371 
372 	DMAR_LOCK(unit);
373 	dmar_enable_ir(unit);
374 	DMAR_UNLOCK(unit);
375 	return (0);
376 }
377 
378 void
dmar_fini_irt(struct dmar_unit * unit)379 dmar_fini_irt(struct dmar_unit *unit)
380 {
381 
382 	unit->ir_enabled = 0;
383 	if (unit->irt != NULL) {
384 		dmar_disable_ir(unit);
385 		dmar_qi_invalidate_iec_glob(unit);
386 		vmem_destroy(unit->irtids);
387 		kmem_free(unit->irt, unit->irte_cnt * sizeof(dmar_irte_t));
388 	}
389 }
390