1366f6083SPeter Grehan /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3c49761ddSPedro F. Giffuni *
4366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc.
5366f6083SPeter Grehan * All rights reserved.
6366f6083SPeter Grehan *
7366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without
8366f6083SPeter Grehan * modification, are permitted provided that the following conditions
9366f6083SPeter Grehan * are met:
10366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright
11366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer.
12366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright
13366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the
14366f6083SPeter Grehan * documentation and/or other materials provided with the distribution.
15366f6083SPeter Grehan *
16366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26366f6083SPeter Grehan * SUCH DAMAGE.
27366f6083SPeter Grehan */
28366f6083SPeter Grehan
29366f6083SPeter Grehan #include <sys/param.h>
30366f6083SPeter Grehan #include <sys/kernel.h>
31366f6083SPeter Grehan #include <sys/systm.h>
32366f6083SPeter Grehan #include <sys/malloc.h>
33366f6083SPeter Grehan
34366f6083SPeter Grehan #include <vm/vm.h>
35366f6083SPeter Grehan #include <vm/pmap.h>
36366f6083SPeter Grehan
37366f6083SPeter Grehan #include <dev/pci/pcireg.h>
38366f6083SPeter Grehan
39366f6083SPeter Grehan #include <machine/vmparam.h>
40f77e9829SNeel Natu #include <contrib/dev/acpica/include/acpi.h>
41366f6083SPeter Grehan
42366f6083SPeter Grehan #include "io/iommu.h"
43366f6083SPeter Grehan
44366f6083SPeter Grehan /*
45366f6083SPeter Grehan * Documented in the "Intel Virtualization Technology for Directed I/O",
46366f6083SPeter Grehan * Architecture Spec, September 2008.
47366f6083SPeter Grehan */
48366f6083SPeter Grehan
49da761f3bSScott Long #define VTD_DRHD_INCLUDE_PCI_ALL(Flags) (((Flags) >> 0) & 0x1)
50da761f3bSScott Long
51366f6083SPeter Grehan /* Section 10.4 "Register Descriptions" */
52366f6083SPeter Grehan struct vtdmap {
53366f6083SPeter Grehan volatile uint32_t version;
54366f6083SPeter Grehan volatile uint32_t res0;
55366f6083SPeter Grehan volatile uint64_t cap;
56366f6083SPeter Grehan volatile uint64_t ext_cap;
57366f6083SPeter Grehan volatile uint32_t gcr;
58366f6083SPeter Grehan volatile uint32_t gsr;
59366f6083SPeter Grehan volatile uint64_t rta;
60366f6083SPeter Grehan volatile uint64_t ccr;
61366f6083SPeter Grehan };
62366f6083SPeter Grehan
63366f6083SPeter Grehan #define VTD_CAP_SAGAW(cap) (((cap) >> 8) & 0x1F)
64366f6083SPeter Grehan #define VTD_CAP_ND(cap) ((cap) & 0x7)
65366f6083SPeter Grehan #define VTD_CAP_CM(cap) (((cap) >> 7) & 0x1)
66366f6083SPeter Grehan #define VTD_CAP_SPS(cap) (((cap) >> 34) & 0xF)
67366f6083SPeter Grehan #define VTD_CAP_RWBF(cap) (((cap) >> 4) & 0x1)
68366f6083SPeter Grehan
69366f6083SPeter Grehan #define VTD_ECAP_DI(ecap) (((ecap) >> 2) & 0x1)
70366f6083SPeter Grehan #define VTD_ECAP_COHERENCY(ecap) ((ecap) & 0x1)
71366f6083SPeter Grehan #define VTD_ECAP_IRO(ecap) (((ecap) >> 8) & 0x3FF)
72366f6083SPeter Grehan
73366f6083SPeter Grehan #define VTD_GCR_WBF (1 << 27)
74366f6083SPeter Grehan #define VTD_GCR_SRTP (1 << 30)
757a22215cSEitan Adler #define VTD_GCR_TE (1U << 31)
76366f6083SPeter Grehan
77366f6083SPeter Grehan #define VTD_GSR_WBFS (1 << 27)
78366f6083SPeter Grehan #define VTD_GSR_RTPS (1 << 30)
797a22215cSEitan Adler #define VTD_GSR_TES (1U << 31)
80366f6083SPeter Grehan
81366f6083SPeter Grehan #define VTD_CCR_ICC (1UL << 63) /* invalidate context cache */
82366f6083SPeter Grehan #define VTD_CCR_CIRG_GLOBAL (1UL << 61) /* global invalidation */
83366f6083SPeter Grehan
84366f6083SPeter Grehan #define VTD_IIR_IVT (1UL << 63) /* invalidation IOTLB */
85366f6083SPeter Grehan #define VTD_IIR_IIRG_GLOBAL (1ULL << 60) /* global IOTLB invalidation */
86366f6083SPeter Grehan #define VTD_IIR_IIRG_DOMAIN (2ULL << 60) /* domain IOTLB invalidation */
87366f6083SPeter Grehan #define VTD_IIR_IIRG_PAGE (3ULL << 60) /* page IOTLB invalidation */
88366f6083SPeter Grehan #define VTD_IIR_DRAIN_READS (1ULL << 49) /* drain pending DMA reads */
89366f6083SPeter Grehan #define VTD_IIR_DRAIN_WRITES (1ULL << 48) /* drain pending DMA writes */
90366f6083SPeter Grehan #define VTD_IIR_DOMAIN_P 32
91366f6083SPeter Grehan
92366f6083SPeter Grehan #define VTD_ROOT_PRESENT 0x1
93366f6083SPeter Grehan #define VTD_CTX_PRESENT 0x1
94366f6083SPeter Grehan #define VTD_CTX_TT_ALL (1UL << 2)
95366f6083SPeter Grehan
96366f6083SPeter Grehan #define VTD_PTE_RD (1UL << 0)
97366f6083SPeter Grehan #define VTD_PTE_WR (1UL << 1)
98366f6083SPeter Grehan #define VTD_PTE_SUPERPAGE (1UL << 7)
99366f6083SPeter Grehan #define VTD_PTE_ADDR_M (0x000FFFFFFFFFF000UL)
100366f6083SPeter Grehan
101a8667250SRyan Stone #define VTD_RID2IDX(rid) (((rid) & 0xff) * 2)
102a8667250SRyan Stone
103366f6083SPeter Grehan struct domain {
104366f6083SPeter Grehan uint64_t *ptp; /* first level page table page */
105366f6083SPeter Grehan int pt_levels; /* number of page table levels */
106366f6083SPeter Grehan int addrwidth; /* 'AW' field in context entry */
107366f6083SPeter Grehan int spsmask; /* supported super page sizes */
108366f6083SPeter Grehan u_int id; /* domain id */
109366f6083SPeter Grehan vm_paddr_t maxaddr; /* highest address to be mapped */
110366f6083SPeter Grehan SLIST_ENTRY(domain) next;
111366f6083SPeter Grehan };
112366f6083SPeter Grehan
113366f6083SPeter Grehan static SLIST_HEAD(, domain) domhead;
114366f6083SPeter Grehan
11553545967SEric Joyner #define DRHD_MAX_UNITS 16
116da761f3bSScott Long static ACPI_DMAR_HARDWARE_UNIT *drhds[DRHD_MAX_UNITS];
117366f6083SPeter Grehan static int drhd_num;
118366f6083SPeter Grehan static struct vtdmap *vtdmaps[DRHD_MAX_UNITS];
119366f6083SPeter Grehan static int max_domains;
120366f6083SPeter Grehan typedef int (*drhd_ident_func_t)(void);
121366f6083SPeter Grehan
122366f6083SPeter Grehan static uint64_t root_table[PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
123366f6083SPeter Grehan static uint64_t ctx_tables[256][PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
124366f6083SPeter Grehan
125366f6083SPeter Grehan static MALLOC_DEFINE(M_VTD, "vtd", "vtd");
126366f6083SPeter Grehan
127366f6083SPeter Grehan static int
vtd_max_domains(struct vtdmap * vtdmap)128366f6083SPeter Grehan vtd_max_domains(struct vtdmap *vtdmap)
129366f6083SPeter Grehan {
130366f6083SPeter Grehan int nd;
131366f6083SPeter Grehan
132366f6083SPeter Grehan nd = VTD_CAP_ND(vtdmap->cap);
133366f6083SPeter Grehan
134366f6083SPeter Grehan switch (nd) {
135366f6083SPeter Grehan case 0:
136366f6083SPeter Grehan return (16);
137366f6083SPeter Grehan case 1:
138366f6083SPeter Grehan return (64);
139366f6083SPeter Grehan case 2:
140366f6083SPeter Grehan return (256);
141366f6083SPeter Grehan case 3:
142366f6083SPeter Grehan return (1024);
143366f6083SPeter Grehan case 4:
144366f6083SPeter Grehan return (4 * 1024);
145366f6083SPeter Grehan case 5:
146366f6083SPeter Grehan return (16 * 1024);
147366f6083SPeter Grehan case 6:
148366f6083SPeter Grehan return (64 * 1024);
149366f6083SPeter Grehan default:
150366f6083SPeter Grehan panic("vtd_max_domains: invalid value of nd (0x%0x)", nd);
151366f6083SPeter Grehan }
152366f6083SPeter Grehan }
153366f6083SPeter Grehan
154366f6083SPeter Grehan static u_int
domain_id(void)155366f6083SPeter Grehan domain_id(void)
156366f6083SPeter Grehan {
157366f6083SPeter Grehan u_int id;
158366f6083SPeter Grehan struct domain *dom;
159366f6083SPeter Grehan
160366f6083SPeter Grehan /* Skip domain id 0 - it is reserved when Caching Mode field is set */
161366f6083SPeter Grehan for (id = 1; id < max_domains; id++) {
162366f6083SPeter Grehan SLIST_FOREACH(dom, &domhead, next) {
163366f6083SPeter Grehan if (dom->id == id)
164366f6083SPeter Grehan break;
165366f6083SPeter Grehan }
166366f6083SPeter Grehan if (dom == NULL)
167366f6083SPeter Grehan break; /* found it */
168366f6083SPeter Grehan }
169366f6083SPeter Grehan
170366f6083SPeter Grehan if (id >= max_domains)
171366f6083SPeter Grehan panic("domain ids exhausted");
172366f6083SPeter Grehan
173366f6083SPeter Grehan return (id);
174366f6083SPeter Grehan }
175366f6083SPeter Grehan
176da761f3bSScott Long static struct vtdmap *
vtd_device_scope(uint16_t rid)177da761f3bSScott Long vtd_device_scope(uint16_t rid)
178da761f3bSScott Long {
179da761f3bSScott Long int i, remaining, pathremaining;
180da761f3bSScott Long char *end, *pathend;
181da761f3bSScott Long struct vtdmap *vtdmap;
182da761f3bSScott Long ACPI_DMAR_HARDWARE_UNIT *drhd;
183da761f3bSScott Long ACPI_DMAR_DEVICE_SCOPE *device_scope;
184da761f3bSScott Long ACPI_DMAR_PCI_PATH *path;
185da761f3bSScott Long
186da761f3bSScott Long for (i = 0; i < drhd_num; i++) {
187da761f3bSScott Long drhd = drhds[i];
188da761f3bSScott Long
189da761f3bSScott Long if (VTD_DRHD_INCLUDE_PCI_ALL(drhd->Flags)) {
190da761f3bSScott Long /*
191da761f3bSScott Long * From Intel VT-d arch spec, version 3.0:
192da761f3bSScott Long * If a DRHD structure with INCLUDE_PCI_ALL flag Set is reported
193da761f3bSScott Long * for a Segment, it must be enumerated by BIOS after all other
194da761f3bSScott Long * DRHD structures for the same Segment.
195da761f3bSScott Long */
196da761f3bSScott Long vtdmap = vtdmaps[i];
197da761f3bSScott Long return(vtdmap);
198da761f3bSScott Long }
199da761f3bSScott Long
200da761f3bSScott Long end = (char *)drhd + drhd->Header.Length;
201da761f3bSScott Long remaining = drhd->Header.Length - sizeof(ACPI_DMAR_HARDWARE_UNIT);
202da761f3bSScott Long while (remaining > sizeof(ACPI_DMAR_DEVICE_SCOPE)) {
203da761f3bSScott Long device_scope = (ACPI_DMAR_DEVICE_SCOPE *)(end - remaining);
204da761f3bSScott Long remaining -= device_scope->Length;
205da761f3bSScott Long
206da761f3bSScott Long switch (device_scope->EntryType){
207da761f3bSScott Long /* 0x01 and 0x02 are PCI device entries */
208da761f3bSScott Long case 0x01:
209da761f3bSScott Long case 0x02:
210da761f3bSScott Long break;
211da761f3bSScott Long default:
212da761f3bSScott Long continue;
213da761f3bSScott Long }
214da761f3bSScott Long
215da761f3bSScott Long if (PCI_RID2BUS(rid) != device_scope->Bus)
216da761f3bSScott Long continue;
217da761f3bSScott Long
218da761f3bSScott Long pathend = (char *)device_scope + device_scope->Length;
219da761f3bSScott Long pathremaining = device_scope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE);
220da761f3bSScott Long while (pathremaining >= sizeof(ACPI_DMAR_PCI_PATH)) {
221da761f3bSScott Long path = (ACPI_DMAR_PCI_PATH *)(pathend - pathremaining);
222da761f3bSScott Long pathremaining -= sizeof(ACPI_DMAR_PCI_PATH);
223da761f3bSScott Long
224da761f3bSScott Long if (PCI_RID2SLOT(rid) != path->Device)
225da761f3bSScott Long continue;
226da761f3bSScott Long if (PCI_RID2FUNC(rid) != path->Function)
227da761f3bSScott Long continue;
228da761f3bSScott Long
229da761f3bSScott Long vtdmap = vtdmaps[i];
230da761f3bSScott Long return (vtdmap);
231da761f3bSScott Long }
232da761f3bSScott Long }
233da761f3bSScott Long }
234da761f3bSScott Long
235da761f3bSScott Long /* No matching scope */
236da761f3bSScott Long return (NULL);
237da761f3bSScott Long }
238da761f3bSScott Long
239366f6083SPeter Grehan static void
vtd_wbflush(struct vtdmap * vtdmap)240366f6083SPeter Grehan vtd_wbflush(struct vtdmap *vtdmap)
241366f6083SPeter Grehan {
242366f6083SPeter Grehan
243366f6083SPeter Grehan if (VTD_ECAP_COHERENCY(vtdmap->ext_cap) == 0)
244366f6083SPeter Grehan pmap_invalidate_cache();
245366f6083SPeter Grehan
246366f6083SPeter Grehan if (VTD_CAP_RWBF(vtdmap->cap)) {
247366f6083SPeter Grehan vtdmap->gcr = VTD_GCR_WBF;
248366f6083SPeter Grehan while ((vtdmap->gsr & VTD_GSR_WBFS) != 0)
249366f6083SPeter Grehan ;
250366f6083SPeter Grehan }
251366f6083SPeter Grehan }
252366f6083SPeter Grehan
253366f6083SPeter Grehan static void
vtd_ctx_global_invalidate(struct vtdmap * vtdmap)254366f6083SPeter Grehan vtd_ctx_global_invalidate(struct vtdmap *vtdmap)
255366f6083SPeter Grehan {
256366f6083SPeter Grehan
257366f6083SPeter Grehan vtdmap->ccr = VTD_CCR_ICC | VTD_CCR_CIRG_GLOBAL;
258366f6083SPeter Grehan while ((vtdmap->ccr & VTD_CCR_ICC) != 0)
259366f6083SPeter Grehan ;
260366f6083SPeter Grehan }
261366f6083SPeter Grehan
262366f6083SPeter Grehan static void
vtd_iotlb_global_invalidate(struct vtdmap * vtdmap)263366f6083SPeter Grehan vtd_iotlb_global_invalidate(struct vtdmap *vtdmap)
264366f6083SPeter Grehan {
265366f6083SPeter Grehan int offset;
266366f6083SPeter Grehan volatile uint64_t *iotlb_reg, val;
267366f6083SPeter Grehan
268366f6083SPeter Grehan vtd_wbflush(vtdmap);
269366f6083SPeter Grehan
270366f6083SPeter Grehan offset = VTD_ECAP_IRO(vtdmap->ext_cap) * 16;
271366f6083SPeter Grehan iotlb_reg = (volatile uint64_t *)((caddr_t)vtdmap + offset + 8);
272366f6083SPeter Grehan
273366f6083SPeter Grehan *iotlb_reg = VTD_IIR_IVT | VTD_IIR_IIRG_GLOBAL |
274366f6083SPeter Grehan VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES;
275366f6083SPeter Grehan
276366f6083SPeter Grehan while (1) {
277366f6083SPeter Grehan val = *iotlb_reg;
278366f6083SPeter Grehan if ((val & VTD_IIR_IVT) == 0)
279366f6083SPeter Grehan break;
280366f6083SPeter Grehan }
281366f6083SPeter Grehan }
282366f6083SPeter Grehan
283366f6083SPeter Grehan static void
vtd_translation_enable(struct vtdmap * vtdmap)284366f6083SPeter Grehan vtd_translation_enable(struct vtdmap *vtdmap)
285366f6083SPeter Grehan {
286366f6083SPeter Grehan
287366f6083SPeter Grehan vtdmap->gcr = VTD_GCR_TE;
288366f6083SPeter Grehan while ((vtdmap->gsr & VTD_GSR_TES) == 0)
289366f6083SPeter Grehan ;
290366f6083SPeter Grehan }
291366f6083SPeter Grehan
292366f6083SPeter Grehan static void
vtd_translation_disable(struct vtdmap * vtdmap)293366f6083SPeter Grehan vtd_translation_disable(struct vtdmap *vtdmap)
294366f6083SPeter Grehan {
295366f6083SPeter Grehan
296366f6083SPeter Grehan vtdmap->gcr = 0;
297366f6083SPeter Grehan while ((vtdmap->gsr & VTD_GSR_TES) != 0)
298366f6083SPeter Grehan ;
299366f6083SPeter Grehan }
300366f6083SPeter Grehan
301366f6083SPeter Grehan static int
vtd_init(void)302366f6083SPeter Grehan vtd_init(void)
303366f6083SPeter Grehan {
304da761f3bSScott Long int i, units, remaining, tmp;
305366f6083SPeter Grehan struct vtdmap *vtdmap;
306366f6083SPeter Grehan vm_paddr_t ctx_paddr;
307f77e9829SNeel Natu char *end, envname[32];
308f77e9829SNeel Natu unsigned long mapaddr;
309f77e9829SNeel Natu ACPI_STATUS status;
310f77e9829SNeel Natu ACPI_TABLE_DMAR *dmar;
311f77e9829SNeel Natu ACPI_DMAR_HEADER *hdr;
312f77e9829SNeel Natu ACPI_DMAR_HARDWARE_UNIT *drhd;
313366f6083SPeter Grehan
314f77e9829SNeel Natu /*
315f77e9829SNeel Natu * Allow the user to override the ACPI DMAR table by specifying the
316f77e9829SNeel Natu * physical address of each remapping unit.
317f77e9829SNeel Natu *
318f77e9829SNeel Natu * The following example specifies two remapping units at
319f77e9829SNeel Natu * physical addresses 0xfed90000 and 0xfeda0000 respectively.
320f77e9829SNeel Natu * set vtd.regmap.0.addr=0xfed90000
321f77e9829SNeel Natu * set vtd.regmap.1.addr=0xfeda0000
322f77e9829SNeel Natu */
323f77e9829SNeel Natu for (units = 0; units < DRHD_MAX_UNITS; units++) {
324f77e9829SNeel Natu snprintf(envname, sizeof(envname), "vtd.regmap.%d.addr", units);
325f77e9829SNeel Natu if (getenv_ulong(envname, &mapaddr) == 0)
326366f6083SPeter Grehan break;
327f77e9829SNeel Natu vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(mapaddr);
328f77e9829SNeel Natu }
329f77e9829SNeel Natu
330f77e9829SNeel Natu if (units > 0)
331f77e9829SNeel Natu goto skip_dmar;
332f77e9829SNeel Natu
333f77e9829SNeel Natu /* Search for DMAR table. */
334f77e9829SNeel Natu status = AcpiGetTable(ACPI_SIG_DMAR, 0, (ACPI_TABLE_HEADER **)&dmar);
335f77e9829SNeel Natu if (ACPI_FAILURE(status))
336f77e9829SNeel Natu return (ENXIO);
337f77e9829SNeel Natu
338f77e9829SNeel Natu end = (char *)dmar + dmar->Header.Length;
339f77e9829SNeel Natu remaining = dmar->Header.Length - sizeof(ACPI_TABLE_DMAR);
340f77e9829SNeel Natu while (remaining > sizeof(ACPI_DMAR_HEADER)) {
341f77e9829SNeel Natu hdr = (ACPI_DMAR_HEADER *)(end - remaining);
342f77e9829SNeel Natu if (hdr->Length > remaining)
343f77e9829SNeel Natu break;
344f77e9829SNeel Natu /*
345f77e9829SNeel Natu * From Intel VT-d arch spec, version 1.3:
346f77e9829SNeel Natu * BIOS implementations must report mapping structures
347f77e9829SNeel Natu * in numerical order, i.e. All remapping structures of
348f77e9829SNeel Natu * type 0 (DRHD) enumerated before remapping structures of
349f77e9829SNeel Natu * type 1 (RMRR) and so forth.
350f77e9829SNeel Natu */
351f77e9829SNeel Natu if (hdr->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT)
352f77e9829SNeel Natu break;
353f77e9829SNeel Natu
354f77e9829SNeel Natu drhd = (ACPI_DMAR_HARDWARE_UNIT *)hdr;
355da761f3bSScott Long drhds[units] = drhd;
356da761f3bSScott Long vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(drhd->Address);
357da761f3bSScott Long if (++units >= DRHD_MAX_UNITS)
358f77e9829SNeel Natu break;
359f77e9829SNeel Natu remaining -= hdr->Length;
360366f6083SPeter Grehan }
361366f6083SPeter Grehan
362366f6083SPeter Grehan if (units <= 0)
363366f6083SPeter Grehan return (ENXIO);
364366f6083SPeter Grehan
365f77e9829SNeel Natu skip_dmar:
366366f6083SPeter Grehan drhd_num = units;
367da761f3bSScott Long
368da761f3bSScott Long max_domains = 64 * 1024; /* maximum valid value */
369da761f3bSScott Long for (i = 0; i < drhd_num; i++){
370da761f3bSScott Long vtdmap = vtdmaps[i];
371366f6083SPeter Grehan
372366f6083SPeter Grehan if (VTD_CAP_CM(vtdmap->cap) != 0)
373366f6083SPeter Grehan panic("vtd_init: invalid caching mode");
374366f6083SPeter Grehan
375da761f3bSScott Long /* take most compatible (minimum) value */
376da761f3bSScott Long if ((tmp = vtd_max_domains(vtdmap)) < max_domains)
377da761f3bSScott Long max_domains = tmp;
378da761f3bSScott Long }
379366f6083SPeter Grehan
380366f6083SPeter Grehan /*
381366f6083SPeter Grehan * Set up the root-table to point to the context-entry tables
382366f6083SPeter Grehan */
383366f6083SPeter Grehan for (i = 0; i < 256; i++) {
384366f6083SPeter Grehan ctx_paddr = vtophys(ctx_tables[i]);
385366f6083SPeter Grehan if (ctx_paddr & PAGE_MASK)
386366f6083SPeter Grehan panic("ctx table (0x%0lx) not page aligned", ctx_paddr);
387366f6083SPeter Grehan
388366f6083SPeter Grehan root_table[i * 2] = ctx_paddr | VTD_ROOT_PRESENT;
389366f6083SPeter Grehan }
390366f6083SPeter Grehan
391366f6083SPeter Grehan return (0);
392366f6083SPeter Grehan }
393366f6083SPeter Grehan
394366f6083SPeter Grehan static void
vtd_cleanup(void)395366f6083SPeter Grehan vtd_cleanup(void)
396366f6083SPeter Grehan {
397366f6083SPeter Grehan }
398366f6083SPeter Grehan
399366f6083SPeter Grehan static void
vtd_enable(void)400366f6083SPeter Grehan vtd_enable(void)
401366f6083SPeter Grehan {
402366f6083SPeter Grehan int i;
403366f6083SPeter Grehan struct vtdmap *vtdmap;
404366f6083SPeter Grehan
405366f6083SPeter Grehan for (i = 0; i < drhd_num; i++) {
406366f6083SPeter Grehan vtdmap = vtdmaps[i];
407366f6083SPeter Grehan vtd_wbflush(vtdmap);
408366f6083SPeter Grehan
409366f6083SPeter Grehan /* Update the root table address */
410366f6083SPeter Grehan vtdmap->rta = vtophys(root_table);
411366f6083SPeter Grehan vtdmap->gcr = VTD_GCR_SRTP;
412366f6083SPeter Grehan while ((vtdmap->gsr & VTD_GSR_RTPS) == 0)
413366f6083SPeter Grehan ;
414366f6083SPeter Grehan
415366f6083SPeter Grehan vtd_ctx_global_invalidate(vtdmap);
416366f6083SPeter Grehan vtd_iotlb_global_invalidate(vtdmap);
417366f6083SPeter Grehan
418366f6083SPeter Grehan vtd_translation_enable(vtdmap);
419366f6083SPeter Grehan }
420366f6083SPeter Grehan }
421366f6083SPeter Grehan
422366f6083SPeter Grehan static void
vtd_disable(void)423366f6083SPeter Grehan vtd_disable(void)
424366f6083SPeter Grehan {
425366f6083SPeter Grehan int i;
426366f6083SPeter Grehan struct vtdmap *vtdmap;
427366f6083SPeter Grehan
428366f6083SPeter Grehan for (i = 0; i < drhd_num; i++) {
429366f6083SPeter Grehan vtdmap = vtdmaps[i];
430366f6083SPeter Grehan vtd_translation_disable(vtdmap);
431366f6083SPeter Grehan }
432366f6083SPeter Grehan }
433366f6083SPeter Grehan
434*2eeb95ccSKonstantin Belousov static int
vtd_add_device(void * arg,device_t dev __unused,uint16_t rid)435*2eeb95ccSKonstantin Belousov vtd_add_device(void *arg, device_t dev __unused, uint16_t rid)
436366f6083SPeter Grehan {
437366f6083SPeter Grehan int idx;
438366f6083SPeter Grehan uint64_t *ctxp;
439366f6083SPeter Grehan struct domain *dom = arg;
440366f6083SPeter Grehan vm_paddr_t pt_paddr;
441366f6083SPeter Grehan struct vtdmap *vtdmap;
442a8667250SRyan Stone uint8_t bus;
443366f6083SPeter Grehan
44427029bc0SRobert Wing KASSERT(dom != NULL, ("domain is NULL"));
44527029bc0SRobert Wing
446a8667250SRyan Stone bus = PCI_RID2BUS(rid);
447366f6083SPeter Grehan ctxp = ctx_tables[bus];
448366f6083SPeter Grehan pt_paddr = vtophys(dom->ptp);
449a8667250SRyan Stone idx = VTD_RID2IDX(rid);
450366f6083SPeter Grehan
451366f6083SPeter Grehan if (ctxp[idx] & VTD_CTX_PRESENT) {
452a8667250SRyan Stone panic("vtd_add_device: device %x is already owned by "
453a8667250SRyan Stone "domain %d", rid,
454366f6083SPeter Grehan (uint16_t)(ctxp[idx + 1] >> 8));
455366f6083SPeter Grehan }
456366f6083SPeter Grehan
457da761f3bSScott Long if ((vtdmap = vtd_device_scope(rid)) == NULL)
458da761f3bSScott Long panic("vtd_add_device: device %x is not in scope for "
459da761f3bSScott Long "any DMA remapping unit", rid);
460da761f3bSScott Long
461366f6083SPeter Grehan /*
462366f6083SPeter Grehan * Order is important. The 'present' bit is set only after all fields
463366f6083SPeter Grehan * of the context pointer are initialized.
464366f6083SPeter Grehan */
465366f6083SPeter Grehan ctxp[idx + 1] = dom->addrwidth | (dom->id << 8);
466366f6083SPeter Grehan
467366f6083SPeter Grehan if (VTD_ECAP_DI(vtdmap->ext_cap))
468366f6083SPeter Grehan ctxp[idx] = VTD_CTX_TT_ALL;
469366f6083SPeter Grehan else
470366f6083SPeter Grehan ctxp[idx] = 0;
471366f6083SPeter Grehan
472366f6083SPeter Grehan ctxp[idx] |= pt_paddr | VTD_CTX_PRESENT;
473366f6083SPeter Grehan
474366f6083SPeter Grehan /*
475366f6083SPeter Grehan * 'Not Present' entries are not cached in either the Context Cache
476366f6083SPeter Grehan * or in the IOTLB, so there is no need to invalidate either of them.
477366f6083SPeter Grehan */
478*2eeb95ccSKonstantin Belousov return (0);
479366f6083SPeter Grehan }
480366f6083SPeter Grehan
481*2eeb95ccSKonstantin Belousov static int
vtd_remove_device(void * arg,device_t dev __unused,uint16_t rid)482*2eeb95ccSKonstantin Belousov vtd_remove_device(void *arg, device_t dev __unused, uint16_t rid)
483366f6083SPeter Grehan {
484366f6083SPeter Grehan int i, idx;
485366f6083SPeter Grehan uint64_t *ctxp;
486366f6083SPeter Grehan struct vtdmap *vtdmap;
487a8667250SRyan Stone uint8_t bus;
488366f6083SPeter Grehan
489a8667250SRyan Stone bus = PCI_RID2BUS(rid);
490366f6083SPeter Grehan ctxp = ctx_tables[bus];
491a8667250SRyan Stone idx = VTD_RID2IDX(rid);
492366f6083SPeter Grehan
493366f6083SPeter Grehan /*
494366f6083SPeter Grehan * Order is important. The 'present' bit is must be cleared first.
495366f6083SPeter Grehan */
496366f6083SPeter Grehan ctxp[idx] = 0;
497366f6083SPeter Grehan ctxp[idx + 1] = 0;
498366f6083SPeter Grehan
499366f6083SPeter Grehan /*
500366f6083SPeter Grehan * Invalidate the Context Cache and the IOTLB.
501366f6083SPeter Grehan *
502366f6083SPeter Grehan * XXX use device-selective invalidation for Context Cache
503366f6083SPeter Grehan * XXX use domain-selective invalidation for IOTLB
504366f6083SPeter Grehan */
505366f6083SPeter Grehan for (i = 0; i < drhd_num; i++) {
506366f6083SPeter Grehan vtdmap = vtdmaps[i];
507366f6083SPeter Grehan vtd_ctx_global_invalidate(vtdmap);
508366f6083SPeter Grehan vtd_iotlb_global_invalidate(vtdmap);
509366f6083SPeter Grehan }
510*2eeb95ccSKonstantin Belousov return (0);
511366f6083SPeter Grehan }
512366f6083SPeter Grehan
5137ce04d0aSNeel Natu #define CREATE_MAPPING 0
5147ce04d0aSNeel Natu #define REMOVE_MAPPING 1
5157ce04d0aSNeel Natu
516366f6083SPeter Grehan static uint64_t
vtd_update_mapping(void * arg,vm_paddr_t gpa,vm_paddr_t hpa,uint64_t len,int remove)5177ce04d0aSNeel Natu vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len,
5187ce04d0aSNeel Natu int remove)
519366f6083SPeter Grehan {
520366f6083SPeter Grehan struct domain *dom;
521366f6083SPeter Grehan int i, spshift, ptpshift, ptpindex, nlevels;
522366f6083SPeter Grehan uint64_t spsize, *ptp;
523366f6083SPeter Grehan
524366f6083SPeter Grehan dom = arg;
525366f6083SPeter Grehan ptpindex = 0;
526366f6083SPeter Grehan ptpshift = 0;
527366f6083SPeter Grehan
528477867a0SNeel Natu KASSERT(gpa + len > gpa, ("%s: invalid gpa range %#lx/%#lx", __func__,
529477867a0SNeel Natu gpa, len));
530477867a0SNeel Natu KASSERT(gpa + len <= dom->maxaddr, ("%s: gpa range %#lx/%#lx beyond "
531477867a0SNeel Natu "domain maxaddr %#lx", __func__, gpa, len, dom->maxaddr));
532477867a0SNeel Natu
533366f6083SPeter Grehan if (gpa & PAGE_MASK)
534366f6083SPeter Grehan panic("vtd_create_mapping: unaligned gpa 0x%0lx", gpa);
535366f6083SPeter Grehan
536366f6083SPeter Grehan if (hpa & PAGE_MASK)
537366f6083SPeter Grehan panic("vtd_create_mapping: unaligned hpa 0x%0lx", hpa);
538366f6083SPeter Grehan
539366f6083SPeter Grehan if (len & PAGE_MASK)
540366f6083SPeter Grehan panic("vtd_create_mapping: unaligned len 0x%0lx", len);
541366f6083SPeter Grehan
542366f6083SPeter Grehan /*
543500eb14aSPedro F. Giffuni * Compute the size of the mapping that we can accommodate.
544366f6083SPeter Grehan *
545366f6083SPeter Grehan * This is based on three factors:
546366f6083SPeter Grehan * - supported super page size
547366f6083SPeter Grehan * - alignment of the region starting at 'gpa' and 'hpa'
548366f6083SPeter Grehan * - length of the region 'len'
549366f6083SPeter Grehan */
550366f6083SPeter Grehan spshift = 48;
551366f6083SPeter Grehan for (i = 3; i >= 0; i--) {
552366f6083SPeter Grehan spsize = 1UL << spshift;
553366f6083SPeter Grehan if ((dom->spsmask & (1 << i)) != 0 &&
554366f6083SPeter Grehan (gpa & (spsize - 1)) == 0 &&
555366f6083SPeter Grehan (hpa & (spsize - 1)) == 0 &&
556366f6083SPeter Grehan (len >= spsize)) {
557366f6083SPeter Grehan break;
558366f6083SPeter Grehan }
559366f6083SPeter Grehan spshift -= 9;
560366f6083SPeter Grehan }
561366f6083SPeter Grehan
562366f6083SPeter Grehan ptp = dom->ptp;
563366f6083SPeter Grehan nlevels = dom->pt_levels;
564366f6083SPeter Grehan while (--nlevels >= 0) {
565366f6083SPeter Grehan ptpshift = 12 + nlevels * 9;
566366f6083SPeter Grehan ptpindex = (gpa >> ptpshift) & 0x1FF;
567366f6083SPeter Grehan
568366f6083SPeter Grehan /* We have reached the leaf mapping */
569366f6083SPeter Grehan if (spshift >= ptpshift) {
570366f6083SPeter Grehan break;
571366f6083SPeter Grehan }
572366f6083SPeter Grehan
573366f6083SPeter Grehan /*
574366f6083SPeter Grehan * We are working on a non-leaf page table page.
575366f6083SPeter Grehan *
576366f6083SPeter Grehan * Create a downstream page table page if necessary and point
577366f6083SPeter Grehan * to it from the current page table.
578366f6083SPeter Grehan */
579366f6083SPeter Grehan if (ptp[ptpindex] == 0) {
580366f6083SPeter Grehan void *nlp = malloc(PAGE_SIZE, M_VTD, M_WAITOK | M_ZERO);
581366f6083SPeter Grehan ptp[ptpindex] = vtophys(nlp)| VTD_PTE_RD | VTD_PTE_WR;
582366f6083SPeter Grehan }
583366f6083SPeter Grehan
584366f6083SPeter Grehan ptp = (uint64_t *)PHYS_TO_DMAP(ptp[ptpindex] & VTD_PTE_ADDR_M);
585366f6083SPeter Grehan }
586366f6083SPeter Grehan
587366f6083SPeter Grehan if ((gpa & ((1UL << ptpshift) - 1)) != 0)
588366f6083SPeter Grehan panic("gpa 0x%lx and ptpshift %d mismatch", gpa, ptpshift);
589366f6083SPeter Grehan
590366f6083SPeter Grehan /*
5917ce04d0aSNeel Natu * Update the 'gpa' -> 'hpa' mapping
592366f6083SPeter Grehan */
5937ce04d0aSNeel Natu if (remove) {
5947ce04d0aSNeel Natu ptp[ptpindex] = 0;
5957ce04d0aSNeel Natu } else {
596366f6083SPeter Grehan ptp[ptpindex] = hpa | VTD_PTE_RD | VTD_PTE_WR;
597366f6083SPeter Grehan
598366f6083SPeter Grehan if (nlevels > 0)
599366f6083SPeter Grehan ptp[ptpindex] |= VTD_PTE_SUPERPAGE;
6007ce04d0aSNeel Natu }
601366f6083SPeter Grehan
602366f6083SPeter Grehan return (1UL << ptpshift);
603366f6083SPeter Grehan }
604366f6083SPeter Grehan
605*2eeb95ccSKonstantin Belousov static int
vtd_create_mapping(void * arg,vm_paddr_t gpa,vm_paddr_t hpa,uint64_t len,uint64_t * res_len)606*2eeb95ccSKonstantin Belousov vtd_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len,
607*2eeb95ccSKonstantin Belousov uint64_t *res_len)
6087ce04d0aSNeel Natu {
6097ce04d0aSNeel Natu
610*2eeb95ccSKonstantin Belousov *res_len = vtd_update_mapping(arg, gpa, hpa, len, CREATE_MAPPING);
611*2eeb95ccSKonstantin Belousov return (0);
6127ce04d0aSNeel Natu }
6137ce04d0aSNeel Natu
614*2eeb95ccSKonstantin Belousov static int
vtd_remove_mapping(void * arg,vm_paddr_t gpa,uint64_t len,uint64_t * res_len)615*2eeb95ccSKonstantin Belousov vtd_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len, uint64_t *res_len)
6167ce04d0aSNeel Natu {
6177ce04d0aSNeel Natu
618*2eeb95ccSKonstantin Belousov *res_len = vtd_update_mapping(arg, gpa, 0, len, REMOVE_MAPPING);
619*2eeb95ccSKonstantin Belousov return (0);
6207ce04d0aSNeel Natu }
6217ce04d0aSNeel Natu
622*2eeb95ccSKonstantin Belousov static int
vtd_invalidate_tlb(void * dom)6237ce04d0aSNeel Natu vtd_invalidate_tlb(void *dom)
6247ce04d0aSNeel Natu {
6257ce04d0aSNeel Natu int i;
6267ce04d0aSNeel Natu struct vtdmap *vtdmap;
6277ce04d0aSNeel Natu
6287ce04d0aSNeel Natu /*
6297ce04d0aSNeel Natu * Invalidate the IOTLB.
6307ce04d0aSNeel Natu * XXX use domain-selective invalidation for IOTLB
6317ce04d0aSNeel Natu */
6327ce04d0aSNeel Natu for (i = 0; i < drhd_num; i++) {
6337ce04d0aSNeel Natu vtdmap = vtdmaps[i];
6347ce04d0aSNeel Natu vtd_iotlb_global_invalidate(vtdmap);
6357ce04d0aSNeel Natu }
636*2eeb95ccSKonstantin Belousov return (0);
6377ce04d0aSNeel Natu }
6387ce04d0aSNeel Natu
639366f6083SPeter Grehan static void *
vtd_create_domain(vm_paddr_t maxaddr)640366f6083SPeter Grehan vtd_create_domain(vm_paddr_t maxaddr)
641366f6083SPeter Grehan {
642366f6083SPeter Grehan struct domain *dom;
643366f6083SPeter Grehan vm_paddr_t addr;
644366f6083SPeter Grehan int tmp, i, gaw, agaw, sagaw, res, pt_levels, addrwidth;
645366f6083SPeter Grehan struct vtdmap *vtdmap;
646366f6083SPeter Grehan
647366f6083SPeter Grehan if (drhd_num <= 0)
648366f6083SPeter Grehan panic("vtd_create_domain: no dma remapping hardware available");
649366f6083SPeter Grehan
650366f6083SPeter Grehan /*
651366f6083SPeter Grehan * Calculate AGAW.
652366f6083SPeter Grehan * Section 3.4.2 "Adjusted Guest Address Width", Architecture Spec.
653366f6083SPeter Grehan */
654366f6083SPeter Grehan addr = 0;
655366f6083SPeter Grehan for (gaw = 0; addr < maxaddr; gaw++)
656366f6083SPeter Grehan addr = 1ULL << gaw;
657366f6083SPeter Grehan
658366f6083SPeter Grehan res = (gaw - 12) % 9;
659366f6083SPeter Grehan if (res == 0)
660366f6083SPeter Grehan agaw = gaw;
661366f6083SPeter Grehan else
662366f6083SPeter Grehan agaw = gaw + 9 - res;
663366f6083SPeter Grehan
664366f6083SPeter Grehan if (agaw > 64)
665366f6083SPeter Grehan agaw = 64;
666366f6083SPeter Grehan
667366f6083SPeter Grehan /*
668366f6083SPeter Grehan * Select the smallest Supported AGAW and the corresponding number
669366f6083SPeter Grehan * of page table levels.
670366f6083SPeter Grehan */
671366f6083SPeter Grehan pt_levels = 2;
672366f6083SPeter Grehan sagaw = 30;
673366f6083SPeter Grehan addrwidth = 0;
674da761f3bSScott Long
675da761f3bSScott Long tmp = ~0;
676da761f3bSScott Long for (i = 0; i < drhd_num; i++) {
677da761f3bSScott Long vtdmap = vtdmaps[i];
678da761f3bSScott Long /* take most compatible value */
679da761f3bSScott Long tmp &= VTD_CAP_SAGAW(vtdmap->cap);
680da761f3bSScott Long }
681da761f3bSScott Long
682366f6083SPeter Grehan for (i = 0; i < 5; i++) {
683366f6083SPeter Grehan if ((tmp & (1 << i)) != 0 && sagaw >= agaw)
684366f6083SPeter Grehan break;
685366f6083SPeter Grehan pt_levels++;
686366f6083SPeter Grehan addrwidth++;
687366f6083SPeter Grehan sagaw += 9;
688366f6083SPeter Grehan if (sagaw > 64)
689366f6083SPeter Grehan sagaw = 64;
690366f6083SPeter Grehan }
691366f6083SPeter Grehan
692366f6083SPeter Grehan if (i >= 5) {
693da761f3bSScott Long panic("vtd_create_domain: SAGAW 0x%x does not support AGAW %d",
694da761f3bSScott Long tmp, agaw);
695366f6083SPeter Grehan }
696366f6083SPeter Grehan
697366f6083SPeter Grehan dom = malloc(sizeof(struct domain), M_VTD, M_ZERO | M_WAITOK);
698366f6083SPeter Grehan dom->pt_levels = pt_levels;
699366f6083SPeter Grehan dom->addrwidth = addrwidth;
700366f6083SPeter Grehan dom->id = domain_id();
701366f6083SPeter Grehan dom->maxaddr = maxaddr;
702366f6083SPeter Grehan dom->ptp = malloc(PAGE_SIZE, M_VTD, M_ZERO | M_WAITOK);
703366f6083SPeter Grehan if ((uintptr_t)dom->ptp & PAGE_MASK)
704366f6083SPeter Grehan panic("vtd_create_domain: ptp (%p) not page aligned", dom->ptp);
705366f6083SPeter Grehan
706b98940e5SNeel Natu #ifdef notyet
707b98940e5SNeel Natu /*
708b98940e5SNeel Natu * XXX superpage mappings for the iommu do not work correctly.
709b98940e5SNeel Natu *
710b98940e5SNeel Natu * By default all physical memory is mapped into the host_domain.
711b98940e5SNeel Natu * When a VM is allocated wired memory the pages belonging to it
712b98940e5SNeel Natu * are removed from the host_domain and added to the vm's domain.
713b98940e5SNeel Natu *
714b98940e5SNeel Natu * If the page being removed was mapped using a superpage mapping
715b98940e5SNeel Natu * in the host_domain then we need to demote the mapping before
716b98940e5SNeel Natu * removing the page.
717b98940e5SNeel Natu *
718b98940e5SNeel Natu * There is not any code to deal with the demotion at the moment
719b98940e5SNeel Natu * so we disable superpage mappings altogether.
720b98940e5SNeel Natu */
721da761f3bSScott Long dom->spsmask = ~0;
722da761f3bSScott Long for (i = 0; i < drhd_num; i++) {
723da761f3bSScott Long vtdmap = vtdmaps[i];
724da761f3bSScott Long /* take most compatible value */
725da761f3bSScott Long dom->spsmask &= VTD_CAP_SPS(vtdmap->cap);
726da761f3bSScott Long }
727b98940e5SNeel Natu #endif
728b98940e5SNeel Natu
729366f6083SPeter Grehan SLIST_INSERT_HEAD(&domhead, dom, next);
730366f6083SPeter Grehan
731366f6083SPeter Grehan return (dom);
732366f6083SPeter Grehan }
733366f6083SPeter Grehan
734366f6083SPeter Grehan static void
vtd_free_ptp(uint64_t * ptp,int level)735366f6083SPeter Grehan vtd_free_ptp(uint64_t *ptp, int level)
736366f6083SPeter Grehan {
737366f6083SPeter Grehan int i;
738366f6083SPeter Grehan uint64_t *nlp;
739366f6083SPeter Grehan
740366f6083SPeter Grehan if (level > 1) {
741366f6083SPeter Grehan for (i = 0; i < 512; i++) {
742366f6083SPeter Grehan if ((ptp[i] & (VTD_PTE_RD | VTD_PTE_WR)) == 0)
743366f6083SPeter Grehan continue;
744366f6083SPeter Grehan if ((ptp[i] & VTD_PTE_SUPERPAGE) != 0)
745366f6083SPeter Grehan continue;
746366f6083SPeter Grehan nlp = (uint64_t *)PHYS_TO_DMAP(ptp[i] & VTD_PTE_ADDR_M);
747366f6083SPeter Grehan vtd_free_ptp(nlp, level - 1);
748366f6083SPeter Grehan }
749366f6083SPeter Grehan }
750366f6083SPeter Grehan
751366f6083SPeter Grehan bzero(ptp, PAGE_SIZE);
752366f6083SPeter Grehan free(ptp, M_VTD);
753366f6083SPeter Grehan }
754366f6083SPeter Grehan
755366f6083SPeter Grehan static void
vtd_destroy_domain(void * arg)756366f6083SPeter Grehan vtd_destroy_domain(void *arg)
757366f6083SPeter Grehan {
758366f6083SPeter Grehan struct domain *dom;
759366f6083SPeter Grehan
760366f6083SPeter Grehan dom = arg;
761366f6083SPeter Grehan
762366f6083SPeter Grehan SLIST_REMOVE(&domhead, dom, domain, next);
763366f6083SPeter Grehan vtd_free_ptp(dom->ptp, dom->pt_levels);
764366f6083SPeter Grehan free(dom, M_VTD);
765366f6083SPeter Grehan }
766366f6083SPeter Grehan
76741335c6bSMark Johnston const struct iommu_ops iommu_ops_intel = {
76841335c6bSMark Johnston .init = vtd_init,
76941335c6bSMark Johnston .cleanup = vtd_cleanup,
77041335c6bSMark Johnston .enable = vtd_enable,
77141335c6bSMark Johnston .disable = vtd_disable,
77241335c6bSMark Johnston .create_domain = vtd_create_domain,
77341335c6bSMark Johnston .destroy_domain = vtd_destroy_domain,
77441335c6bSMark Johnston .create_mapping = vtd_create_mapping,
77541335c6bSMark Johnston .remove_mapping = vtd_remove_mapping,
77641335c6bSMark Johnston .add_device = vtd_add_device,
77741335c6bSMark Johnston .remove_device = vtd_remove_device,
77841335c6bSMark Johnston .invalidate_tlb = vtd_invalidate_tlb,
779366f6083SPeter Grehan };
780