1*0f5116d7SKonstantin Belousov /*-
2*0f5116d7SKonstantin Belousov * SPDX-License-Identifier: BSD-2-Clause
3*0f5116d7SKonstantin Belousov *
4*0f5116d7SKonstantin Belousov * Copyright (c) 2024 The FreeBSD Foundation
5*0f5116d7SKonstantin Belousov *
6*0f5116d7SKonstantin Belousov * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7*0f5116d7SKonstantin Belousov * under sponsorship from the FreeBSD Foundation.
8*0f5116d7SKonstantin Belousov *
9*0f5116d7SKonstantin Belousov * Redistribution and use in source and binary forms, with or without
10*0f5116d7SKonstantin Belousov * modification, are permitted provided that the following conditions
11*0f5116d7SKonstantin Belousov * are met:
12*0f5116d7SKonstantin Belousov * 1. Redistributions of source code must retain the above copyright
13*0f5116d7SKonstantin Belousov * notice, this list of conditions and the following disclaimer.
14*0f5116d7SKonstantin Belousov * 2. Redistributions in binary form must reproduce the above copyright
15*0f5116d7SKonstantin Belousov * notice, this list of conditions and the following disclaimer in the
16*0f5116d7SKonstantin Belousov * documentation and/or other materials provided with the distribution.
17*0f5116d7SKonstantin Belousov *
18*0f5116d7SKonstantin Belousov * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19*0f5116d7SKonstantin Belousov * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*0f5116d7SKonstantin Belousov * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*0f5116d7SKonstantin Belousov * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22*0f5116d7SKonstantin Belousov * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23*0f5116d7SKonstantin Belousov * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24*0f5116d7SKonstantin Belousov * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25*0f5116d7SKonstantin Belousov * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26*0f5116d7SKonstantin Belousov * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27*0f5116d7SKonstantin Belousov * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28*0f5116d7SKonstantin Belousov * SUCH DAMAGE.
29*0f5116d7SKonstantin Belousov */
30*0f5116d7SKonstantin Belousov
31*0f5116d7SKonstantin Belousov #include <sys/param.h>
32*0f5116d7SKonstantin Belousov #include <sys/systm.h>
33*0f5116d7SKonstantin Belousov #include <sys/malloc.h>
34*0f5116d7SKonstantin Belousov #include <sys/bus.h>
35*0f5116d7SKonstantin Belousov #include <sys/interrupt.h>
36*0f5116d7SKonstantin Belousov #include <sys/domainset.h>
37*0f5116d7SKonstantin Belousov #include <sys/kernel.h>
38*0f5116d7SKonstantin Belousov #include <sys/ktr.h>
39*0f5116d7SKonstantin Belousov #include <sys/limits.h>
40*0f5116d7SKonstantin Belousov #include <sys/lock.h>
41*0f5116d7SKonstantin Belousov #include <sys/memdesc.h>
42*0f5116d7SKonstantin Belousov #include <sys/mutex.h>
43*0f5116d7SKonstantin Belousov #include <sys/proc.h>
44*0f5116d7SKonstantin Belousov #include <sys/rwlock.h>
45*0f5116d7SKonstantin Belousov #include <sys/rman.h>
46*0f5116d7SKonstantin Belousov #include <sys/sysctl.h>
47*0f5116d7SKonstantin Belousov #include <sys/taskqueue.h>
48*0f5116d7SKonstantin Belousov #include <sys/tree.h>
49*0f5116d7SKonstantin Belousov #include <sys/uio.h>
50*0f5116d7SKonstantin Belousov #include <sys/vmem.h>
51*0f5116d7SKonstantin Belousov #include <vm/vm.h>
52*0f5116d7SKonstantin Belousov #include <vm/vm_extern.h>
53*0f5116d7SKonstantin Belousov #include <vm/vm_kern.h>
54*0f5116d7SKonstantin Belousov #include <vm/vm_object.h>
55*0f5116d7SKonstantin Belousov #include <vm/vm_page.h>
56*0f5116d7SKonstantin Belousov #include <vm/vm_pager.h>
57*0f5116d7SKonstantin Belousov #include <vm/vm_map.h>
58*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/acpi.h>
59*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/accommon.h>
60*0f5116d7SKonstantin Belousov #include <dev/acpica/acpivar.h>
61*0f5116d7SKonstantin Belousov #include <dev/pci/pcireg.h>
62*0f5116d7SKonstantin Belousov #include <dev/pci/pcivar.h>
63*0f5116d7SKonstantin Belousov #include <machine/atomic.h>
64*0f5116d7SKonstantin Belousov #include <machine/bus.h>
65*0f5116d7SKonstantin Belousov #include <machine/md_var.h>
66*0f5116d7SKonstantin Belousov #include <machine/intr_machdep.h>
67*0f5116d7SKonstantin Belousov #include <x86/include/apicreg.h>
68*0f5116d7SKonstantin Belousov #include <x86/include/apicvar.h>
69*0f5116d7SKonstantin Belousov #include <machine/specialreg.h>
70*0f5116d7SKonstantin Belousov #include <x86/include/busdma_impl.h>
71*0f5116d7SKonstantin Belousov #include <dev/iommu/busdma_iommu.h>
72*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_reg.h>
73*0f5116d7SKonstantin Belousov #include <x86/iommu/x86_iommu.h>
74*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_iommu.h>
75*0f5116d7SKonstantin Belousov
76*0f5116d7SKonstantin Belousov static struct amdiommu_ctx *amdiommu_ir_find(device_t src, uint16_t *rid,
77*0f5116d7SKonstantin Belousov bool *is_iommu);
78*0f5116d7SKonstantin Belousov static void amdiommu_ir_free_irte(struct amdiommu_ctx *ctx, device_t src,
79*0f5116d7SKonstantin Belousov u_int cookie);
80*0f5116d7SKonstantin Belousov
81*0f5116d7SKonstantin Belousov int
amdiommu_alloc_msi_intr(device_t src,u_int * cookies,u_int count)82*0f5116d7SKonstantin Belousov amdiommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
83*0f5116d7SKonstantin Belousov {
84*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx;
85*0f5116d7SKonstantin Belousov vmem_addr_t vmem_res;
86*0f5116d7SKonstantin Belousov u_int idx, i;
87*0f5116d7SKonstantin Belousov int error;
88*0f5116d7SKonstantin Belousov
89*0f5116d7SKonstantin Belousov ctx = amdiommu_ir_find(src, NULL, NULL);
90*0f5116d7SKonstantin Belousov if (ctx == NULL || !CTX2AMD(ctx)->irte_enabled) {
91*0f5116d7SKonstantin Belousov for (i = 0; i < count; i++)
92*0f5116d7SKonstantin Belousov cookies[i] = -1;
93*0f5116d7SKonstantin Belousov return (EOPNOTSUPP);
94*0f5116d7SKonstantin Belousov }
95*0f5116d7SKonstantin Belousov
96*0f5116d7SKonstantin Belousov error = vmem_alloc(ctx->irtids, count, M_FIRSTFIT | M_NOWAIT,
97*0f5116d7SKonstantin Belousov &vmem_res);
98*0f5116d7SKonstantin Belousov if (error != 0) {
99*0f5116d7SKonstantin Belousov KASSERT(error != EOPNOTSUPP,
100*0f5116d7SKonstantin Belousov ("impossible EOPNOTSUPP from vmem"));
101*0f5116d7SKonstantin Belousov return (error);
102*0f5116d7SKonstantin Belousov }
103*0f5116d7SKonstantin Belousov idx = vmem_res;
104*0f5116d7SKonstantin Belousov for (i = 0; i < count; i++)
105*0f5116d7SKonstantin Belousov cookies[i] = idx + i;
106*0f5116d7SKonstantin Belousov return (0);
107*0f5116d7SKonstantin Belousov }
108*0f5116d7SKonstantin Belousov
109*0f5116d7SKonstantin Belousov int
amdiommu_map_msi_intr(device_t src,u_int cpu,u_int vector,u_int cookie,uint64_t * addr,uint32_t * data)110*0f5116d7SKonstantin Belousov amdiommu_map_msi_intr(device_t src, u_int cpu, u_int vector,
111*0f5116d7SKonstantin Belousov u_int cookie, uint64_t *addr, uint32_t *data)
112*0f5116d7SKonstantin Belousov {
113*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx;
114*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit;
115*0f5116d7SKonstantin Belousov uint16_t rid;
116*0f5116d7SKonstantin Belousov bool is_iommu;
117*0f5116d7SKonstantin Belousov
118*0f5116d7SKonstantin Belousov ctx = amdiommu_ir_find(src, &rid, &is_iommu);
119*0f5116d7SKonstantin Belousov if (is_iommu) {
120*0f5116d7SKonstantin Belousov if (addr != NULL) {
121*0f5116d7SKonstantin Belousov *data = vector;
122*0f5116d7SKonstantin Belousov *addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
123*0f5116d7SKonstantin Belousov if (x2apic_mode)
124*0f5116d7SKonstantin Belousov *addr |= ((uint64_t)cpu & 0xffffff00) << 32;
125*0f5116d7SKonstantin Belousov else
126*0f5116d7SKonstantin Belousov KASSERT(cpu <= 0xff,
127*0f5116d7SKonstantin Belousov ("cpu id too big %d", cpu));
128*0f5116d7SKonstantin Belousov }
129*0f5116d7SKonstantin Belousov return (0);
130*0f5116d7SKonstantin Belousov }
131*0f5116d7SKonstantin Belousov
132*0f5116d7SKonstantin Belousov if (ctx == NULL)
133*0f5116d7SKonstantin Belousov return (EOPNOTSUPP);
134*0f5116d7SKonstantin Belousov unit = CTX2AMD(ctx);
135*0f5116d7SKonstantin Belousov if (!unit->irte_enabled || cookie == -1)
136*0f5116d7SKonstantin Belousov return (EOPNOTSUPP);
137*0f5116d7SKonstantin Belousov if (cookie >= unit->irte_nentries) {
138*0f5116d7SKonstantin Belousov device_printf(src, "amdiommu%d: cookie %u irte max %u\n",
139*0f5116d7SKonstantin Belousov unit->iommu.unit, cookie, unit->irte_nentries);
140*0f5116d7SKonstantin Belousov return (EINVAL);
141*0f5116d7SKonstantin Belousov }
142*0f5116d7SKonstantin Belousov
143*0f5116d7SKonstantin Belousov if (unit->irte_x2apic) {
144*0f5116d7SKonstantin Belousov struct amdiommu_irte_basic_vapic_x2 *irte;
145*0f5116d7SKonstantin Belousov
146*0f5116d7SKonstantin Belousov irte = &ctx->irtx2[cookie];
147*0f5116d7SKonstantin Belousov irte->supiopf = 0;
148*0f5116d7SKonstantin Belousov irte->inttype = 0;
149*0f5116d7SKonstantin Belousov irte->rqeoi = 0;
150*0f5116d7SKonstantin Belousov irte->dm = 0;
151*0f5116d7SKonstantin Belousov irte->guestmode = 0;
152*0f5116d7SKonstantin Belousov irte->dest0 = cpu;
153*0f5116d7SKonstantin Belousov irte->rsrv0 = 0;
154*0f5116d7SKonstantin Belousov irte->vector = vector;
155*0f5116d7SKonstantin Belousov irte->rsrv1 = 0;
156*0f5116d7SKonstantin Belousov irte->rsrv2 = 0;
157*0f5116d7SKonstantin Belousov irte->dest1 = cpu >> 24;
158*0f5116d7SKonstantin Belousov atomic_thread_fence_rel();
159*0f5116d7SKonstantin Belousov irte->remapen = 1;
160*0f5116d7SKonstantin Belousov } else {
161*0f5116d7SKonstantin Belousov struct amdiommu_irte_basic_novapic *irte;
162*0f5116d7SKonstantin Belousov
163*0f5116d7SKonstantin Belousov irte = &ctx->irtb[cookie];
164*0f5116d7SKonstantin Belousov irte->supiopf = 0;
165*0f5116d7SKonstantin Belousov irte->inttype = 0; /* fixed */
166*0f5116d7SKonstantin Belousov irte->rqeoi = 0;
167*0f5116d7SKonstantin Belousov irte->dm = 0; /* phys */
168*0f5116d7SKonstantin Belousov irte->guestmode = 0;
169*0f5116d7SKonstantin Belousov irte->dest = cpu;
170*0f5116d7SKonstantin Belousov irte->vector = vector;
171*0f5116d7SKonstantin Belousov irte->rsrv = 0;
172*0f5116d7SKonstantin Belousov atomic_thread_fence_rel();
173*0f5116d7SKonstantin Belousov irte->remapen = 1;
174*0f5116d7SKonstantin Belousov }
175*0f5116d7SKonstantin Belousov
176*0f5116d7SKonstantin Belousov if (addr != NULL) {
177*0f5116d7SKonstantin Belousov *data = cookie;
178*0f5116d7SKonstantin Belousov *addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
179*0f5116d7SKonstantin Belousov if (unit->irte_x2apic)
180*0f5116d7SKonstantin Belousov *addr |= ((uint64_t)cpu & 0xffffff00) << 32;
181*0f5116d7SKonstantin Belousov }
182*0f5116d7SKonstantin Belousov
183*0f5116d7SKonstantin Belousov iommu_get_requester(src, &rid);
184*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit);
185*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ir_locked(unit, rid);
186*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit);
187*0f5116d7SKonstantin Belousov
188*0f5116d7SKonstantin Belousov return (0);
189*0f5116d7SKonstantin Belousov }
190*0f5116d7SKonstantin Belousov
191*0f5116d7SKonstantin Belousov int
amdiommu_unmap_msi_intr(device_t src,u_int cookie)192*0f5116d7SKonstantin Belousov amdiommu_unmap_msi_intr(device_t src, u_int cookie)
193*0f5116d7SKonstantin Belousov {
194*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx;
195*0f5116d7SKonstantin Belousov
196*0f5116d7SKonstantin Belousov if (cookie == -1)
197*0f5116d7SKonstantin Belousov return (0);
198*0f5116d7SKonstantin Belousov ctx = amdiommu_ir_find(src, NULL, NULL);
199*0f5116d7SKonstantin Belousov amdiommu_ir_free_irte(ctx, src, cookie);
200*0f5116d7SKonstantin Belousov return (0);
201*0f5116d7SKonstantin Belousov }
202*0f5116d7SKonstantin Belousov
203*0f5116d7SKonstantin Belousov int
amdiommu_map_ioapic_intr(u_int ioapic_id,u_int cpu,u_int vector,bool edge,bool activehi,int irq,u_int * cookie,uint32_t * hi,uint32_t * lo)204*0f5116d7SKonstantin Belousov amdiommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector,
205*0f5116d7SKonstantin Belousov bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
206*0f5116d7SKonstantin Belousov uint32_t *lo)
207*0f5116d7SKonstantin Belousov {
208*0f5116d7SKonstantin Belousov /* XXXKIB for early call from ioapic_create() */
209*0f5116d7SKonstantin Belousov return (EOPNOTSUPP);
210*0f5116d7SKonstantin Belousov }
211*0f5116d7SKonstantin Belousov
212*0f5116d7SKonstantin Belousov int
amdiommu_unmap_ioapic_intr(u_int ioapic_id,u_int * cookie)213*0f5116d7SKonstantin Belousov amdiommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
214*0f5116d7SKonstantin Belousov {
215*0f5116d7SKonstantin Belousov /* XXXKIB */
216*0f5116d7SKonstantin Belousov return (0);
217*0f5116d7SKonstantin Belousov }
218*0f5116d7SKonstantin Belousov
219*0f5116d7SKonstantin Belousov static struct amdiommu_ctx *
amdiommu_ir_find(device_t src,uint16_t * ridp,bool * is_iommu)220*0f5116d7SKonstantin Belousov amdiommu_ir_find(device_t src, uint16_t *ridp, bool *is_iommu)
221*0f5116d7SKonstantin Belousov {
222*0f5116d7SKonstantin Belousov devclass_t src_class;
223*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit;
224*0f5116d7SKonstantin Belousov struct amdiommu_ctx *ctx;
225*0f5116d7SKonstantin Belousov uint32_t edte;
226*0f5116d7SKonstantin Belousov uint16_t rid;
227*0f5116d7SKonstantin Belousov uint8_t dte;
228*0f5116d7SKonstantin Belousov int error;
229*0f5116d7SKonstantin Belousov
230*0f5116d7SKonstantin Belousov /*
231*0f5116d7SKonstantin Belousov * We need to determine if the interrupt source generates FSB
232*0f5116d7SKonstantin Belousov * interrupts. If yes, it is either IOMMU, in which case
233*0f5116d7SKonstantin Belousov * interrupts are not remapped. Or it is HPET, and interrupts
234*0f5116d7SKonstantin Belousov * are remapped. For HPET, source id is reported by HPET
235*0f5116d7SKonstantin Belousov * record in IVHD ACPI table.
236*0f5116d7SKonstantin Belousov */
237*0f5116d7SKonstantin Belousov if (is_iommu != NULL)
238*0f5116d7SKonstantin Belousov *is_iommu = false;
239*0f5116d7SKonstantin Belousov
240*0f5116d7SKonstantin Belousov ctx = NULL;
241*0f5116d7SKonstantin Belousov
242*0f5116d7SKonstantin Belousov src_class = device_get_devclass(src);
243*0f5116d7SKonstantin Belousov if (src_class == devclass_find("amdiommu")) {
244*0f5116d7SKonstantin Belousov if (is_iommu != NULL)
245*0f5116d7SKonstantin Belousov *is_iommu = true;
246*0f5116d7SKonstantin Belousov } else if (src_class == devclass_find("hpet")) {
247*0f5116d7SKonstantin Belousov error = amdiommu_find_unit_for_hpet(src, &unit, &rid, &dte,
248*0f5116d7SKonstantin Belousov &edte, bootverbose);
249*0f5116d7SKonstantin Belousov ctx = NULL; // XXXKIB allocate ctx
250*0f5116d7SKonstantin Belousov } else {
251*0f5116d7SKonstantin Belousov error = amdiommu_find_unit(src, &unit, &rid, &dte, &edte,
252*0f5116d7SKonstantin Belousov bootverbose);
253*0f5116d7SKonstantin Belousov if (error == 0) {
254*0f5116d7SKonstantin Belousov iommu_get_requester(src, &rid);
255*0f5116d7SKonstantin Belousov ctx = amdiommu_get_ctx_for_dev(unit, src,
256*0f5116d7SKonstantin Belousov rid, 0, false /* XXXKIB */, false, dte, edte);
257*0f5116d7SKonstantin Belousov }
258*0f5116d7SKonstantin Belousov }
259*0f5116d7SKonstantin Belousov if (ridp != NULL)
260*0f5116d7SKonstantin Belousov *ridp = rid;
261*0f5116d7SKonstantin Belousov return (ctx);
262*0f5116d7SKonstantin Belousov }
263*0f5116d7SKonstantin Belousov
264*0f5116d7SKonstantin Belousov static void
amdiommu_ir_free_irte(struct amdiommu_ctx * ctx,device_t src,u_int cookie)265*0f5116d7SKonstantin Belousov amdiommu_ir_free_irte(struct amdiommu_ctx *ctx, device_t src,
266*0f5116d7SKonstantin Belousov u_int cookie)
267*0f5116d7SKonstantin Belousov {
268*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit;
269*0f5116d7SKonstantin Belousov uint16_t rid;
270*0f5116d7SKonstantin Belousov
271*0f5116d7SKonstantin Belousov MPASS(ctx != NULL);
272*0f5116d7SKonstantin Belousov unit = CTX2AMD(ctx);
273*0f5116d7SKonstantin Belousov
274*0f5116d7SKonstantin Belousov KASSERT(unit->irte_enabled,
275*0f5116d7SKonstantin Belousov ("unmap: cookie %d ctx %p unit %p", cookie, ctx, unit));
276*0f5116d7SKonstantin Belousov KASSERT(cookie < unit->irte_nentries,
277*0f5116d7SKonstantin Belousov ("bad cookie %u %u", cookie, unit->irte_nentries));
278*0f5116d7SKonstantin Belousov
279*0f5116d7SKonstantin Belousov if (unit->irte_x2apic) {
280*0f5116d7SKonstantin Belousov struct amdiommu_irte_basic_vapic_x2 *irte;
281*0f5116d7SKonstantin Belousov
282*0f5116d7SKonstantin Belousov irte = &ctx->irtx2[cookie];
283*0f5116d7SKonstantin Belousov irte->remapen = 0;
284*0f5116d7SKonstantin Belousov atomic_thread_fence_rel();
285*0f5116d7SKonstantin Belousov bzero(irte, sizeof(*irte));
286*0f5116d7SKonstantin Belousov } else {
287*0f5116d7SKonstantin Belousov struct amdiommu_irte_basic_novapic *irte;
288*0f5116d7SKonstantin Belousov
289*0f5116d7SKonstantin Belousov irte = &ctx->irtb[cookie];
290*0f5116d7SKonstantin Belousov irte->remapen = 0;
291*0f5116d7SKonstantin Belousov atomic_thread_fence_rel();
292*0f5116d7SKonstantin Belousov bzero(irte, sizeof(*irte));
293*0f5116d7SKonstantin Belousov }
294*0f5116d7SKonstantin Belousov iommu_get_requester(src, &rid);
295*0f5116d7SKonstantin Belousov AMDIOMMU_LOCK(unit);
296*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ir_locked(unit, rid);
297*0f5116d7SKonstantin Belousov AMDIOMMU_UNLOCK(unit);
298*0f5116d7SKonstantin Belousov }
299*0f5116d7SKonstantin Belousov
300*0f5116d7SKonstantin Belousov int
amdiommu_ctx_init_irte(struct amdiommu_ctx * ctx)301*0f5116d7SKonstantin Belousov amdiommu_ctx_init_irte(struct amdiommu_ctx *ctx)
302*0f5116d7SKonstantin Belousov {
303*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit;
304*0f5116d7SKonstantin Belousov void *ptr;
305*0f5116d7SKonstantin Belousov unsigned long sz;
306*0f5116d7SKonstantin Belousov int dom;
307*0f5116d7SKonstantin Belousov
308*0f5116d7SKonstantin Belousov unit = CTX2AMD(ctx);
309*0f5116d7SKonstantin Belousov if (!unit->irte_enabled)
310*0f5116d7SKonstantin Belousov return (0);
311*0f5116d7SKonstantin Belousov
312*0f5116d7SKonstantin Belousov KASSERT(unit->irte_nentries > 0 &&
313*0f5116d7SKonstantin Belousov unit->irte_nentries <= 2048 &&
314*0f5116d7SKonstantin Belousov powerof2(unit->irte_nentries),
315*0f5116d7SKonstantin Belousov ("amdiommu%d: unit %p irte_nentries %u", unit->iommu.unit,
316*0f5116d7SKonstantin Belousov unit, unit->irte_nentries));
317*0f5116d7SKonstantin Belousov
318*0f5116d7SKonstantin Belousov if (bus_get_domain(unit->iommu.dev, &dom) != 0)
319*0f5116d7SKonstantin Belousov dom = -1;
320*0f5116d7SKonstantin Belousov sz = unit->irte_nentries;
321*0f5116d7SKonstantin Belousov sz *= unit->irte_x2apic ? sizeof(struct amdiommu_irte_basic_vapic_x2) :
322*0f5116d7SKonstantin Belousov sizeof(struct amdiommu_irte_basic_novapic);
323*0f5116d7SKonstantin Belousov
324*0f5116d7SKonstantin Belousov if (dom != -1) {
325*0f5116d7SKonstantin Belousov ptr = contigmalloc_domainset(sz, M_DEVBUF, DOMAINSET_PREF(dom),
326*0f5116d7SKonstantin Belousov M_WAITOK | M_ZERO, 0, ~0ull, 128, 0);
327*0f5116d7SKonstantin Belousov } else {
328*0f5116d7SKonstantin Belousov ptr = contigmalloc(sz, M_DEVBUF, M_WAITOK | M_ZERO,
329*0f5116d7SKonstantin Belousov 0, ~0ull, 128, 0);
330*0f5116d7SKonstantin Belousov }
331*0f5116d7SKonstantin Belousov if (unit->irte_x2apic)
332*0f5116d7SKonstantin Belousov ctx->irtx2 = ptr;
333*0f5116d7SKonstantin Belousov else
334*0f5116d7SKonstantin Belousov ctx->irtb = ptr;
335*0f5116d7SKonstantin Belousov ctx->irtids = vmem_create("amdirt", 0, unit->irte_nentries, 1, 0,
336*0f5116d7SKonstantin Belousov M_FIRSTFIT | M_NOWAIT);
337*0f5116d7SKonstantin Belousov
338*0f5116d7SKonstantin Belousov intr_reprogram(); // XXXKIB
339*0f5116d7SKonstantin Belousov
340*0f5116d7SKonstantin Belousov return (0);
341*0f5116d7SKonstantin Belousov }
342*0f5116d7SKonstantin Belousov
343*0f5116d7SKonstantin Belousov void
amdiommu_ctx_fini_irte(struct amdiommu_ctx * ctx)344*0f5116d7SKonstantin Belousov amdiommu_ctx_fini_irte(struct amdiommu_ctx *ctx)
345*0f5116d7SKonstantin Belousov {
346*0f5116d7SKonstantin Belousov struct amdiommu_unit *unit;
347*0f5116d7SKonstantin Belousov
348*0f5116d7SKonstantin Belousov unit = CTX2AMD(ctx);
349*0f5116d7SKonstantin Belousov if (!unit->irte_enabled)
350*0f5116d7SKonstantin Belousov return;
351*0f5116d7SKonstantin Belousov if (unit->irte_x2apic)
352*0f5116d7SKonstantin Belousov free(ctx->irtx2, M_DEVBUF);
353*0f5116d7SKonstantin Belousov else
354*0f5116d7SKonstantin Belousov free(ctx->irtb, M_DEVBUF);
355*0f5116d7SKonstantin Belousov vmem_destroy(ctx->irtids);
356*0f5116d7SKonstantin Belousov }
357*0f5116d7SKonstantin Belousov
358*0f5116d7SKonstantin Belousov int
amdiommu_init_irt(struct amdiommu_unit * unit)359*0f5116d7SKonstantin Belousov amdiommu_init_irt(struct amdiommu_unit *unit)
360*0f5116d7SKonstantin Belousov {
361*0f5116d7SKonstantin Belousov int enabled, nentries;
362*0f5116d7SKonstantin Belousov
363*0f5116d7SKonstantin Belousov SYSCTL_ADD_INT(&unit->iommu.sysctl_ctx,
364*0f5116d7SKonstantin Belousov SYSCTL_CHILDREN(device_get_sysctl_tree(unit->iommu.dev)),
365*0f5116d7SKonstantin Belousov OID_AUTO, "ir", CTLFLAG_RD, &unit->irte_enabled, 0,
366*0f5116d7SKonstantin Belousov "Interrupt remapping ops enabled");
367*0f5116d7SKonstantin Belousov
368*0f5116d7SKonstantin Belousov enabled = 1;
369*0f5116d7SKonstantin Belousov TUNABLE_INT_FETCH("hw.iommu.ir", &enabled);
370*0f5116d7SKonstantin Belousov
371*0f5116d7SKonstantin Belousov unit->irte_enabled = enabled != 0;
372*0f5116d7SKonstantin Belousov if (!unit->irte_enabled)
373*0f5116d7SKonstantin Belousov return (0);
374*0f5116d7SKonstantin Belousov
375*0f5116d7SKonstantin Belousov nentries = 32;
376*0f5116d7SKonstantin Belousov TUNABLE_INT_FETCH("hw.iommu.amd.ir_num", &nentries);
377*0f5116d7SKonstantin Belousov nentries = roundup_pow_of_two(nentries);
378*0f5116d7SKonstantin Belousov if (nentries < 1)
379*0f5116d7SKonstantin Belousov nentries = 1;
380*0f5116d7SKonstantin Belousov if (nentries > 2048)
381*0f5116d7SKonstantin Belousov nentries = 2048;
382*0f5116d7SKonstantin Belousov unit->irte_nentries = nentries;
383*0f5116d7SKonstantin Belousov
384*0f5116d7SKonstantin Belousov unit->irte_x2apic = x2apic_mode;
385*0f5116d7SKonstantin Belousov return (0);
386*0f5116d7SKonstantin Belousov }
387*0f5116d7SKonstantin Belousov
388*0f5116d7SKonstantin Belousov void
amdiommu_fini_irt(struct amdiommu_unit * unit)389*0f5116d7SKonstantin Belousov amdiommu_fini_irt(struct amdiommu_unit *unit)
390*0f5116d7SKonstantin Belousov {
391*0f5116d7SKonstantin Belousov }
392