1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #ifndef __X86_IOMMU_AMD_IOMMU_H
32 #define __X86_IOMMU_AMD_IOMMU_H
33
34 #include <dev/iommu/iommu.h>
35
36 #define AMDIOMMU_DEV_REPORTED 0x00000001
37
38 struct amdiommu_unit;
39
40 struct amdiommu_domain {
41 struct iommu_domain iodom;
42 int domain; /* (c) DID, written in context entry */
43 struct amdiommu_unit *unit; /* (c) */
44
45 u_int ctx_cnt; /* (u) Number of contexts owned */
46 u_int refs; /* (u) Refs, including ctx */
47 LIST_ENTRY(amdiommu_domain) link;/* (u) Member in the iommu list */
48 vm_object_t pgtbl_obj; /* (c) Page table pages */
49 vm_page_t pgtblr; /* (c) Page table root page */
50 u_int pglvl; /* (c) Page table levels */
51 };
52
53 struct amdiommu_ctx {
54 struct iommu_ctx context;
55 struct amdiommu_irte_basic_novapic *irtb;
56 struct amdiommu_irte_basic_vapic_x2 *irtx2;
57 vmem_t *irtids;
58 };
59
60 struct amdiommu_unit {
61 struct iommu_unit iommu;
62 struct x86_unit_common x86c;
63 u_int unit_dom; /* Served PCI domain, from IVRS */
64 u_int device_id; /* basically PCI RID */
65 u_int unit_id; /* Hypertransport Unit ID, deprecated */
66 TAILQ_ENTRY(amdiommu_unit) unit_next;
67 int seccap_reg;
68 uint64_t efr;
69 vm_paddr_t mmio_base;
70 vm_size_t mmio_sz;
71 struct resource *mmio_res;
72 int mmio_rid;
73 uint64_t hw_ctrl;
74
75 u_int numirqs;
76 struct resource *msix_table;
77 int msix_table_rid;
78 int irq_cmdev_rid;
79 struct resource *irq_cmdev;
80 void *irq_cmdev_cookie;
81
82 struct amdiommu_dte *dev_tbl;
83 vm_object_t devtbl_obj;
84
85 LIST_HEAD(, amdiommu_domain) domains;
86 struct unrhdr *domids;
87
88 struct mtx event_lock;
89 struct amdiommu_event_generic *event_log;
90 u_int event_log_size;
91 u_int event_log_head;
92 u_int event_log_tail;
93 struct task event_task;
94 struct taskqueue *event_taskqueue;
95 struct amdiommu_event_generic event_copy_log[16];
96 u_int event_copy_head;
97 u_int event_copy_tail;
98
99 int irte_enabled; /* int for sysctl type */
100 bool irte_x2apic;
101 u_int irte_nentries;
102 };
103
104 #define AMD2IOMMU(unit) (&((unit)->iommu))
105 #define IOMMU2AMD(unit) \
106 __containerof((unit), struct amdiommu_unit, iommu)
107
108 #define AMDIOMMU_LOCK(unit) mtx_lock(&AMD2IOMMU(unit)->lock)
109 #define AMDIOMMU_UNLOCK(unit) mtx_unlock(&AMD2IOMMU(unit)->lock)
110 #define AMDIOMMU_ASSERT_LOCKED(unit) mtx_assert(&AMD2IOMMU(unit)->lock, \
111 MA_OWNED)
112
113 #define AMDIOMMU_EVENT_LOCK(unit) mtx_lock_spin(&(unit)->event_lock)
114 #define AMDIOMMU_EVENT_UNLOCK(unit) mtx_unlock_spin(&(unit)->event_lock)
115 #define AMDIOMMU_EVENT_ASSERT_LOCKED(unit) \
116 mtx_assert(&(unit)->event_lock, MA_OWNED)
117
118 #define DOM2IODOM(domain) (&((domain)->iodom))
119 #define IODOM2DOM(domain) \
120 __containerof((domain), struct amdiommu_domain, iodom)
121
122 #define CTX2IOCTX(ctx) (&((ctx)->context))
123 #define IOCTX2CTX(ctx) \
124 __containerof((ctx), struct amdiommu_ctx, context)
125
126 #define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain)
127 #define CTX2AMD(ctx) (CTX2DOM(ctx)->unit)
128 #define DOM2AMD(domain) ((domain)->unit)
129
130 #define AMDIOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
131 #define AMDIOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
132 #define AMDIOMMU_DOMAIN_ASSERT_LOCKED(dom) \
133 mtx_assert(&(dom)->iodom.lock, MA_OWNED)
134
135 #define AMDIOMMU_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
136 #define AMDIOMMU_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
137 #define AMDIOMMU_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
138 #define AMDIOMMU_DOMAIN_ASSERT_PGLOCKED(dom) \
139 VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
140
141 #define AMDIOMMU_RID 1001
142
143 static inline uint32_t
amdiommu_read4(const struct amdiommu_unit * unit,int reg)144 amdiommu_read4(const struct amdiommu_unit *unit, int reg)
145 {
146
147 return (bus_read_4(unit->mmio_res, reg));
148 }
149
150 static inline uint64_t
amdiommu_read8(const struct amdiommu_unit * unit,int reg)151 amdiommu_read8(const struct amdiommu_unit *unit, int reg)
152 {
153 #ifdef __i386__
154 uint32_t high, low;
155
156 low = bus_read_4(unit->mmio_res, reg);
157 high = bus_read_4(unit->mmio_res, reg + 4);
158 return (low | ((uint64_t)high << 32));
159 #else
160 return (bus_read_8(unit->mmio_res, reg));
161 #endif
162 }
163
164 static inline void
amdiommu_write4(const struct amdiommu_unit * unit,int reg,uint32_t val)165 amdiommu_write4(const struct amdiommu_unit *unit, int reg, uint32_t val)
166 {
167 bus_write_4(unit->mmio_res, reg, val);
168 }
169
170 static inline void
amdiommu_write8(const struct amdiommu_unit * unit,int reg,uint64_t val)171 amdiommu_write8(const struct amdiommu_unit *unit, int reg, uint64_t val)
172 {
173 #ifdef __i386__
174 uint32_t high, low;
175
176 low = val;
177 high = val >> 32;
178 bus_write_4(unit->mmio_res, reg, low);
179 bus_write_4(unit->mmio_res, reg + 4, high);
180 #else
181 bus_write_8(unit->mmio_res, reg, val);
182 #endif
183 }
184
185 int amdiommu_find_unit(device_t dev, struct amdiommu_unit **unitp,
186 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose);
187 int amdiommu_find_unit_for_ioapic(int apic_id, struct amdiommu_unit **unitp,
188 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose);
189 int amdiommu_find_unit_for_hpet(device_t hpet, struct amdiommu_unit **unitp,
190 uint16_t *ridp, uint8_t *dtep, uint32_t *edtep, bool verbose);
191
192 int amdiommu_init_cmd(struct amdiommu_unit *unit);
193 void amdiommu_fini_cmd(struct amdiommu_unit *unit);
194
195 void amdiommu_event_intr(struct amdiommu_unit *unit, uint64_t status);
196 int amdiommu_init_event(struct amdiommu_unit *unit);
197 void amdiommu_fini_event(struct amdiommu_unit *unit);
198
199 int amdiommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count);
200 int amdiommu_map_msi_intr(device_t src, u_int cpu, u_int vector,
201 u_int cookie, uint64_t *addr, uint32_t *data);
202 int amdiommu_unmap_msi_intr(device_t src, u_int cookie);
203 int amdiommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector,
204 bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
205 uint32_t *lo);
206 int amdiommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie);
207 int amdiommu_init_irt(struct amdiommu_unit *unit);
208 void amdiommu_fini_irt(struct amdiommu_unit *unit);
209 int amdiommu_ctx_init_irte(struct amdiommu_ctx *ctx);
210 void amdiommu_ctx_fini_irte(struct amdiommu_ctx *ctx);
211
212 void amdiommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
213 bool cansleep);
214 void amdiommu_domain_unload(struct iommu_domain *iodom,
215 struct iommu_map_entries_tailq *entries, bool cansleep);
216 struct amdiommu_ctx *amdiommu_get_ctx_for_dev(struct amdiommu_unit *unit,
217 device_t dev, uint16_t rid, int dev_domain, bool id_mapped,
218 bool rmrr_init, uint8_t dte, uint32_t edte);
219 struct iommu_ctx *amdiommu_get_ctx(struct iommu_unit *iommu, device_t dev,
220 uint16_t rid, bool id_mapped, bool rmrr_init);
221 struct amdiommu_ctx *amdiommu_find_ctx_locked(struct amdiommu_unit *unit,
222 uint16_t rid);
223 void amdiommu_free_ctx_locked_method(struct iommu_unit *iommu,
224 struct iommu_ctx *context);
225 struct amdiommu_domain *amdiommu_find_domain(struct amdiommu_unit *unit,
226 uint16_t rid);
227
228 void amdiommu_qi_invalidate_ctx_locked(struct amdiommu_ctx *ctx);
229 void amdiommu_qi_invalidate_ctx_locked_nowait(struct amdiommu_ctx *ctx);
230 void amdiommu_qi_invalidate_ir_locked(struct amdiommu_unit *unit,
231 uint16_t devid);
232 void amdiommu_qi_invalidate_ir_locked_nowait(struct amdiommu_unit *unit,
233 uint16_t devid);
234 void amdiommu_qi_invalidate_all_pages_locked_nowait(
235 struct amdiommu_domain *domain);
236 void amdiommu_qi_invalidate_wait_sync(struct iommu_unit *iommu);
237
238 int amdiommu_domain_alloc_pgtbl(struct amdiommu_domain *domain);
239 void amdiommu_domain_free_pgtbl(struct amdiommu_domain *domain);
240 extern const struct iommu_domain_map_ops amdiommu_domain_map_ops;
241
242 #endif
243