xref: /freebsd/sys/x86/iommu/intel_intrmap.c (revision 5c2bc3db201a4fe8d7911cf816bea104d5dc2138)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  *
4  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/memdesc.h>
36 #include <sys/mutex.h>
37 #include <sys/rman.h>
38 #include <sys/rwlock.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/tree.h>
42 #include <sys/vmem.h>
43 #include <vm/vm.h>
44 #include <vm/vm_extern.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 #include <machine/bus.h>
51 #include <machine/intr_machdep.h>
52 #include <x86/include/apicreg.h>
53 #include <x86/include/apicvar.h>
54 #include <x86/include/busdma_impl.h>
55 #include <dev/iommu/busdma_iommu.h>
56 #include <x86/iommu/intel_reg.h>
57 #include <x86/iommu/x86_iommu.h>
58 #include <x86/iommu/intel_dmar.h>
59 #include <x86/iommu/iommu_intrmap.h>
60 
61 static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
62     int *is_dmar);
63 static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
64     uint64_t low, uint16_t rid);
65 static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
66 
67 int
68 iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
69 {
70 	struct dmar_unit *unit;
71 	vmem_addr_t vmem_res;
72 	u_int idx, i;
73 	int error;
74 
75 	unit = dmar_ir_find(src, NULL, NULL);
76 	if (unit == NULL || !unit->ir_enabled) {
77 		for (i = 0; i < count; i++)
78 			cookies[i] = -1;
79 		return (EOPNOTSUPP);
80 	}
81 
82 	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
83 	    &vmem_res);
84 	if (error != 0) {
85 		KASSERT(error != EOPNOTSUPP,
86 		    ("impossible EOPNOTSUPP from vmem"));
87 		return (error);
88 	}
89 	idx = vmem_res;
90 	for (i = 0; i < count; i++)
91 		cookies[i] = idx + i;
92 	return (0);
93 }
94 
95 int
96 iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
97     uint64_t *addr, uint32_t *data)
98 {
99 	struct dmar_unit *unit;
100 	uint64_t low;
101 	uint16_t rid;
102 	int is_dmar;
103 
104 	unit = dmar_ir_find(src, &rid, &is_dmar);
105 	if (is_dmar) {
106 		KASSERT(unit == NULL, ("DMAR cannot translate itself"));
107 
108 		/*
109 		 * See VT-d specification, 5.1.6 Remapping Hardware -
110 		 * Interrupt Programming.
111 		 */
112 		*data = vector;
113 		*addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
114 		if (x2apic_mode)
115 			*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
116 		else
117 			KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
118 		return (0);
119 	}
120 	if (unit == NULL || !unit->ir_enabled || cookie == -1)
121 		return (EOPNOTSUPP);
122 
123 	low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
124 	    DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
125 	    DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
126 	    DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
127 	dmar_ir_program_irte(unit, cookie, low, rid);
128 
129 	if (addr != NULL) {
130 		/*
131 		 * See VT-d specification, 5.1.5.2 MSI and MSI-X
132 		 * Register Programming.
133 		 */
134 		*addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
135 		    ((cookie & 0x8000) << 2) | 0x18;
136 		*data = 0;
137 	}
138 	return (0);
139 }
140 
141 int
142 iommu_unmap_msi_intr(device_t src, u_int cookie)
143 {
144 	struct dmar_unit *unit;
145 
146 	if (cookie == -1)
147 		return (0);
148 	unit = dmar_ir_find(src, NULL, NULL);
149 	return (dmar_ir_free_irte(unit, cookie));
150 }
151 
152 int
153 iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
154     bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
155 {
156 	struct dmar_unit *unit;
157 	vmem_addr_t vmem_res;
158 	uint64_t low, iorte;
159 	u_int idx;
160 	int error;
161 	uint16_t rid;
162 
163 	unit = dmar_find_ioapic(ioapic_id, &rid);
164 	if (unit == NULL || !unit->ir_enabled) {
165 		*cookie = -1;
166 		return (EOPNOTSUPP);
167 	}
168 
169 	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
170 	if (error != 0) {
171 		KASSERT(error != EOPNOTSUPP,
172 		    ("impossible EOPNOTSUPP from vmem"));
173 		return (error);
174 	}
175 	idx = vmem_res;
176 	low = 0;
177 	switch (irq) {
178 	case IRQ_EXTINT:
179 		low |= DMAR_IRTE1_DLM_ExtINT;
180 		break;
181 	case IRQ_NMI:
182 		low |= DMAR_IRTE1_DLM_NMI;
183 		break;
184 	case IRQ_SMI:
185 		low |= DMAR_IRTE1_DLM_SMI;
186 		break;
187 	default:
188 		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
189 		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
190 		break;
191 	}
192 	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
193 	    DMAR_IRTE1_DST_xAPIC(cpu)) |
194 	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
195 	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
196 	dmar_ir_program_irte(unit, idx, low, rid);
197 
198 	if (hi != NULL) {
199 		/*
200 		 * See VT-d specification, 5.1.5.1 I/OxAPIC
201 		 * Programming.
202 		 */
203 		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
204 		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
205 		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
206 		    (activehi ? IOART_INTAHI : IOART_INTALO) |
207 		    IOART_DELFIXED | vector;
208 		*hi = iorte >> 32;
209 		*lo = iorte;
210 	}
211 	*cookie = idx;
212 	return (0);
213 }
214 
215 int
216 iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
217 {
218 	struct dmar_unit *unit;
219 	u_int idx;
220 
221 	idx = *cookie;
222 	if (idx == -1)
223 		return (0);
224 	*cookie = -1;
225 	unit = dmar_find_ioapic(ioapic_id, NULL);
226 	KASSERT(unit != NULL && unit->ir_enabled,
227 	    ("unmap: cookie %d unit %p", idx, unit));
228 	return (dmar_ir_free_irte(unit, idx));
229 }
230 
231 static struct dmar_unit *
232 dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
233 {
234 	devclass_t src_class;
235 	struct dmar_unit *unit;
236 
237 	/*
238 	 * We need to determine if the interrupt source generates FSB
239 	 * interrupts.  If yes, it is either DMAR, in which case
240 	 * interrupts are not remapped.  Or it is HPET, and interrupts
241 	 * are remapped.  For HPET, source id is reported by HPET
242 	 * record in DMAR ACPI table.
243 	 */
244 	if (is_dmar != NULL)
245 		*is_dmar = FALSE;
246 	src_class = device_get_devclass(src);
247 	if (src_class == devclass_find("dmar")) {
248 		unit = NULL;
249 		if (is_dmar != NULL)
250 			*is_dmar = TRUE;
251 	} else if (src_class == devclass_find("hpet")) {
252 		unit = dmar_find_hpet(src, rid);
253 	} else {
254 		unit = dmar_find(src, bootverbose);
255 		if (unit != NULL && rid != NULL)
256 			iommu_get_requester(src, rid);
257 	}
258 	return (unit);
259 }
260 
261 static void
262 dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
263     uint16_t rid)
264 {
265 	dmar_irte_t *irte;
266 	uint64_t high;
267 
268 	KASSERT(idx < unit->irte_cnt,
269 	    ("bad cookie %d %d", idx, unit->irte_cnt));
270 	irte = &(unit->irt[idx]);
271 	high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
272 	    DMAR_IRTE2_SID_RID(rid);
273 	if (bootverbose) {
274 		device_printf(unit->iommu.dev,
275 		    "programming irte[%d] rid %#x high %#jx low %#jx\n",
276 		    idx, rid, (uintmax_t)high, (uintmax_t)low);
277 	}
278 	DMAR_LOCK(unit);
279 	if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
280 		/*
281 		 * The rte is already valid.  Assume that the request
282 		 * is to remap the interrupt for balancing.  Only low
283 		 * word of rte needs to be changed.  Assert that the
284 		 * high word contains expected value.
285 		 */
286 		KASSERT(irte->irte2 == high,
287 		    ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
288 		    (uintmax_t)high));
289 		dmar_pte_update(&irte->irte1, low);
290 	} else {
291 		dmar_pte_store(&irte->irte2, high);
292 		dmar_pte_store(&irte->irte1, low);
293 	}
294 	dmar_qi_invalidate_iec(unit, idx, 1);
295 	DMAR_UNLOCK(unit);
296 
297 }
298 
299 static int
300 dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
301 {
302 	dmar_irte_t *irte;
303 
304 	KASSERT(unit != NULL && unit->ir_enabled,
305 	    ("unmap: cookie %d unit %p", cookie, unit));
306 	KASSERT(cookie < unit->irte_cnt,
307 	    ("bad cookie %u %u", cookie, unit->irte_cnt));
308 	irte = &(unit->irt[cookie]);
309 	dmar_pte_clear(&irte->irte1);
310 	dmar_pte_clear(&irte->irte2);
311 	DMAR_LOCK(unit);
312 	dmar_qi_invalidate_iec(unit, cookie, 1);
313 	DMAR_UNLOCK(unit);
314 	vmem_free(unit->irtids, cookie, 1);
315 	return (0);
316 }
317 
318 int
319 dmar_init_irt(struct dmar_unit *unit)
320 {
321 
322 	if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
323 		return (0);
324 	unit->ir_enabled = 1;
325 	TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
326 	if (!unit->ir_enabled)
327 		return (0);
328 	if (!unit->qi_enabled) {
329 		unit->ir_enabled = 0;
330 		if (bootverbose)
331 			device_printf(unit->iommu.dev,
332 	     "QI disabled, disabling interrupt remapping\n");
333 		return (0);
334 	}
335 	unit->irte_cnt = roundup_pow_of_two(num_io_irqs);
336 	unit->irt = kmem_alloc_contig(unit->irte_cnt * sizeof(dmar_irte_t),
337 	    M_ZERO | M_WAITOK, 0, iommu_high, PAGE_SIZE, 0,
338 	    DMAR_IS_COHERENT(unit) ?
339 	    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
340 	if (unit->irt == NULL)
341 		return (ENOMEM);
342 	unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
343 	unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
344 	    M_FIRSTFIT | M_NOWAIT);
345 	DMAR_LOCK(unit);
346 	dmar_load_irt_ptr(unit);
347 	dmar_qi_invalidate_iec_glob(unit);
348 	DMAR_UNLOCK(unit);
349 
350 	/*
351 	 * Initialize mappings for already configured interrupt pins.
352 	 * Required, because otherwise the interrupts fault without
353 	 * irtes.
354 	 */
355 	intr_reprogram();
356 
357 	DMAR_LOCK(unit);
358 	dmar_enable_ir(unit);
359 	DMAR_UNLOCK(unit);
360 	return (0);
361 }
362 
363 void
364 dmar_fini_irt(struct dmar_unit *unit)
365 {
366 
367 	unit->ir_enabled = 0;
368 	if (unit->irt != NULL) {
369 		dmar_disable_ir(unit);
370 		dmar_qi_invalidate_iec_glob(unit);
371 		vmem_destroy(unit->irtids);
372 		kmem_free(unit->irt, unit->irte_cnt * sizeof(dmar_irte_t));
373 	}
374 }
375