xref: /freebsd/sys/x86/iommu/intel_dmar.h (revision 5ac01ce026aa871e24065f931b5b5c36024c96f9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2015 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef __X86_IOMMU_INTEL_DMAR_H
35 #define	__X86_IOMMU_INTEL_DMAR_H
36 
37 #include <dev/iommu/iommu.h>
38 
39 struct dmar_unit;
40 
41 /*
42  * Locking annotations:
43  * (u) - Protected by iommu unit lock
44  * (d) - Protected by domain lock
45  * (c) - Immutable after initialization
46  */
47 
48 /*
49  * The domain abstraction.  Most non-constant members of the domain
50  * are protected by owning dmar unit lock, not by the domain lock.
51  * Most important, the dmar lock protects the contexts list.
52  *
53  * The domain lock protects the address map for the domain, and list
54  * of unload entries delayed.
55  *
56  * Page tables pages and pages content is protected by the vm object
57  * lock pgtbl_obj, which contains the page tables pages.
58  */
59 struct dmar_domain {
60 	struct iommu_domain iodom;
61 	int domain;			/* (c) DID, written in context entry */
62 	int mgaw;			/* (c) Real max address width */
63 	int agaw;			/* (c) Adjusted guest address width */
64 	int pglvl;			/* (c) The pagelevel */
65 	int awlvl;			/* (c) The pagelevel as the bitmask,
66 					   to set in context entry */
67 	u_int ctx_cnt;			/* (u) Number of contexts owned */
68 	u_int refs;			/* (u) Refs, including ctx */
69 	struct dmar_unit *dmar;		/* (c) */
70 	LIST_ENTRY(dmar_domain) link;	/* (u) Member in the dmar list */
71 	LIST_HEAD(, dmar_ctx) contexts;	/* (u) */
72 	vm_object_t pgtbl_obj;		/* (c) Page table pages */
73 	u_int batch_no;
74 };
75 
76 struct dmar_ctx {
77 	struct iommu_ctx context;
78 	uint16_t rid;			/* (c) pci RID */
79 	uint64_t last_fault_rec[2];	/* Last fault reported */
80 	LIST_ENTRY(dmar_ctx) link;	/* (u) Member in the domain list */
81 	u_int refs;			/* (u) References from tags */
82 };
83 
84 #define	DMAR_DOMAIN_PGLOCK(dom)		VM_OBJECT_WLOCK((dom)->pgtbl_obj)
85 #define	DMAR_DOMAIN_PGTRYLOCK(dom)	VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
86 #define	DMAR_DOMAIN_PGUNLOCK(dom)	VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
87 #define	DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
88 	VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
89 
90 #define	DMAR_DOMAIN_LOCK(dom)	mtx_lock(&(dom)->iodom.lock)
91 #define	DMAR_DOMAIN_UNLOCK(dom)	mtx_unlock(&(dom)->iodom.lock)
92 #define	DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
93 
94 #define	DMAR2IOMMU(dmar)	&((dmar)->iommu)
95 #define	IOMMU2DMAR(dmar)	\
96 	__containerof((dmar), struct dmar_unit, iommu)
97 
98 #define	DOM2IODOM(domain)	&((domain)->iodom)
99 #define	IODOM2DOM(domain)	\
100 	__containerof((domain), struct dmar_domain, iodom)
101 
102 #define	CTX2IOCTX(ctx)		&((ctx)->context)
103 #define	IOCTX2CTX(ctx)		\
104 	__containerof((ctx), struct dmar_ctx, context)
105 
106 #define	CTX2DOM(ctx)		IODOM2DOM((ctx)->context.domain)
107 #define	CTX2DMAR(ctx)		(CTX2DOM(ctx)->dmar)
108 #define	DOM2DMAR(domain)	((domain)->dmar)
109 
110 struct dmar_msi_data {
111 	int irq;
112 	int irq_rid;
113 	struct resource *irq_res;
114 	void *intr_handle;
115 	int (*handler)(void *);
116 	int msi_data_reg;
117 	int msi_addr_reg;
118 	int msi_uaddr_reg;
119 	void (*enable_intr)(struct dmar_unit *);
120 	void (*disable_intr)(struct dmar_unit *);
121 	const char *name;
122 };
123 
124 #define	DMAR_INTR_FAULT		0
125 #define	DMAR_INTR_QI		1
126 #define	DMAR_INTR_TOTAL		2
127 
128 struct dmar_unit {
129 	struct iommu_unit iommu;
130 	device_t dev;
131 	uint16_t segment;
132 	uint64_t base;
133 
134 	/* Resources */
135 	int reg_rid;
136 	struct resource *regs;
137 
138 	struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
139 
140 	/* Hardware registers cache */
141 	uint32_t hw_ver;
142 	uint64_t hw_cap;
143 	uint64_t hw_ecap;
144 	uint32_t hw_gcmd;
145 
146 	/* Data for being a dmar */
147 	LIST_HEAD(, dmar_domain) domains;
148 	struct unrhdr *domids;
149 	vm_object_t ctx_obj;
150 	u_int barrier_flags;
151 
152 	/* Fault handler data */
153 	struct mtx fault_lock;
154 	uint64_t *fault_log;
155 	int fault_log_head;
156 	int fault_log_tail;
157 	int fault_log_size;
158 	struct task fault_task;
159 	struct taskqueue *fault_taskqueue;
160 
161 	/* QI */
162 	int qi_enabled;
163 	vm_offset_t inv_queue;
164 	vm_size_t inv_queue_size;
165 	uint32_t inv_queue_avail;
166 	uint32_t inv_queue_tail;
167 	volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
168 					       descr completion */
169 	uint64_t inv_waitd_seq_hw_phys;
170 	uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
171 	u_int inv_waitd_gen;	/* seq number generation AKA seq overflows */
172 	u_int inv_seq_waiters;	/* count of waiters for seq */
173 	u_int inv_queue_full;	/* informational counter */
174 
175 	/* IR */
176 	int ir_enabled;
177 	vm_paddr_t irt_phys;
178 	dmar_irte_t *irt;
179 	u_int irte_cnt;
180 	vmem_t *irtids;
181 
182 	/* Delayed freeing of map entries queue processing */
183 	struct iommu_map_entries_tailq tlb_flush_entries;
184 	struct task qi_task;
185 	struct taskqueue *qi_taskqueue;
186 };
187 
188 #define	DMAR_LOCK(dmar)		mtx_lock(&(dmar)->iommu.lock)
189 #define	DMAR_UNLOCK(dmar)	mtx_unlock(&(dmar)->iommu.lock)
190 #define	DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
191 
192 #define	DMAR_FAULT_LOCK(dmar)	mtx_lock_spin(&(dmar)->fault_lock)
193 #define	DMAR_FAULT_UNLOCK(dmar)	mtx_unlock_spin(&(dmar)->fault_lock)
194 #define	DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, MA_OWNED)
195 
196 #define	DMAR_IS_COHERENT(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
197 #define	DMAR_HAS_QI(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
198 #define	DMAR_X2APIC(dmar) \
199 	(x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
200 
201 /* Barrier ids */
202 #define	DMAR_BARRIER_RMRR	0
203 #define	DMAR_BARRIER_USEQ	1
204 
205 struct dmar_unit *dmar_find(device_t dev, bool verbose);
206 struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
207 struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
208 
209 u_int dmar_nd2mask(u_int nd);
210 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
211 int domain_set_agaw(struct dmar_domain *domain, int mgaw);
212 int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
213     bool allow_less);
214 vm_pindex_t pglvl_max_pages(int pglvl);
215 int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
216 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
217 iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
218 int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
219     iommu_gaddr_t *isizep);
220 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
221 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
222 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
223     struct sf_buf **sf);
224 void dmar_unmap_pgtbl(struct sf_buf *sf);
225 int dmar_load_root_entry_ptr(struct dmar_unit *unit);
226 int dmar_inv_ctx_glob(struct dmar_unit *unit);
227 int dmar_inv_iotlb_glob(struct dmar_unit *unit);
228 int dmar_flush_write_bufs(struct dmar_unit *unit);
229 void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
230 void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
231 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
232 int dmar_enable_translation(struct dmar_unit *unit);
233 int dmar_disable_translation(struct dmar_unit *unit);
234 int dmar_load_irt_ptr(struct dmar_unit *unit);
235 int dmar_enable_ir(struct dmar_unit *unit);
236 int dmar_disable_ir(struct dmar_unit *unit);
237 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
238 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
239 uint64_t dmar_get_timeout(void);
240 void dmar_update_timeout(uint64_t newval);
241 
242 int dmar_fault_intr(void *arg);
243 void dmar_enable_fault_intr(struct dmar_unit *unit);
244 void dmar_disable_fault_intr(struct dmar_unit *unit);
245 int dmar_init_fault_log(struct dmar_unit *unit);
246 void dmar_fini_fault_log(struct dmar_unit *unit);
247 
248 int dmar_qi_intr(void *arg);
249 void dmar_enable_qi_intr(struct dmar_unit *unit);
250 void dmar_disable_qi_intr(struct dmar_unit *unit);
251 int dmar_init_qi(struct dmar_unit *unit);
252 void dmar_fini_qi(struct dmar_unit *unit);
253 void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
254     iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
255 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
256 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
257 void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
258 void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
259 
260 vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
261     iommu_gaddr_t maxaddr);
262 void put_idmap_pgtbl(vm_object_t obj);
263 void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
264     iommu_gaddr_t size);
265 int domain_alloc_pgtbl(struct dmar_domain *domain);
266 void domain_free_pgtbl(struct dmar_domain *domain);
267 extern const struct iommu_domain_map_ops dmar_domain_map_ops;
268 
269 int dmar_dev_depth(device_t child);
270 void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
271 
272 struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
273     uint16_t rid, bool id_mapped, bool rmrr_init);
274 struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
275     int dev_domain, int dev_busno, const void *dev_path, int dev_path_len,
276     bool id_mapped, bool rmrr_init);
277 int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
278 void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
279 void dmar_free_ctx(struct dmar_ctx *ctx);
280 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
281 void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
282 void dmar_domain_unload(struct dmar_domain *domain,
283     struct iommu_map_entries_tailq *entries, bool cansleep);
284 void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
285 
286 void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
287     int dev_busno, const void *dev_path, int dev_path_len,
288     struct iommu_map_entries_tailq *rmrr_entries);
289 int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
290 
291 void dmar_quirks_post_ident(struct dmar_unit *dmar);
292 void dmar_quirks_pre_use(struct iommu_unit *dmar);
293 
294 int dmar_init_irt(struct dmar_unit *unit);
295 void dmar_fini_irt(struct dmar_unit *unit);
296 
297 extern iommu_haddr_t dmar_high;
298 extern int haw;
299 extern int dmar_tbl_pagecnt;
300 extern int dmar_batch_coalesce;
301 
302 static inline uint32_t
303 dmar_read4(const struct dmar_unit *unit, int reg)
304 {
305 
306 	return (bus_read_4(unit->regs, reg));
307 }
308 
309 static inline uint64_t
310 dmar_read8(const struct dmar_unit *unit, int reg)
311 {
312 #ifdef __i386__
313 	uint32_t high, low;
314 
315 	low = bus_read_4(unit->regs, reg);
316 	high = bus_read_4(unit->regs, reg + 4);
317 	return (low | ((uint64_t)high << 32));
318 #else
319 	return (bus_read_8(unit->regs, reg));
320 #endif
321 }
322 
323 static inline void
324 dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
325 {
326 
327 	KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
328 	    (unit->hw_gcmd & DMAR_GCMD_TE),
329 	    ("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
330 	    unit->hw_gcmd, val));
331 	bus_write_4(unit->regs, reg, val);
332 }
333 
334 static inline void
335 dmar_write8(const struct dmar_unit *unit, int reg, uint64_t val)
336 {
337 
338 	KASSERT(reg != DMAR_GCMD_REG, ("8byte GCMD write"));
339 #ifdef __i386__
340 	uint32_t high, low;
341 
342 	low = val;
343 	high = val >> 32;
344 	bus_write_4(unit->regs, reg, low);
345 	bus_write_4(unit->regs, reg + 4, high);
346 #else
347 	bus_write_8(unit->regs, reg, val);
348 #endif
349 }
350 
351 /*
352  * dmar_pte_store and dmar_pte_clear ensure that on i386, 32bit writes
353  * are issued in the correct order.  For store, the lower word,
354  * containing the P or R and W bits, is set only after the high word
355  * is written.  For clear, the P bit is cleared first, then the high
356  * word is cleared.
357  *
358  * dmar_pte_update updates the pte.  For amd64, the update is atomic.
359  * For i386, it first disables the entry by clearing the word
360  * containing the P bit, and then defer to dmar_pte_store.  The locked
361  * cmpxchg8b is probably available on any machine having DMAR support,
362  * but interrupt translation table may be mapped uncached.
363  */
364 static inline void
365 dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
366 {
367 #ifdef __i386__
368 	volatile uint32_t *p;
369 	uint32_t hi, lo;
370 
371 	hi = val >> 32;
372 	lo = val;
373 	p = (volatile uint32_t *)dst;
374 	*(p + 1) = hi;
375 	*p = lo;
376 #else
377 	*dst = val;
378 #endif
379 }
380 
381 static inline void
382 dmar_pte_store(volatile uint64_t *dst, uint64_t val)
383 {
384 
385 	KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
386 	    dst, (uintmax_t)*dst, (uintmax_t)val));
387 	dmar_pte_store1(dst, val);
388 }
389 
390 static inline void
391 dmar_pte_update(volatile uint64_t *dst, uint64_t val)
392 {
393 
394 #ifdef __i386__
395 	volatile uint32_t *p;
396 
397 	p = (volatile uint32_t *)dst;
398 	*p = 0;
399 #endif
400 	dmar_pte_store1(dst, val);
401 }
402 
403 static inline void
404 dmar_pte_clear(volatile uint64_t *dst)
405 {
406 #ifdef __i386__
407 	volatile uint32_t *p;
408 
409 	p = (volatile uint32_t *)dst;
410 	*p = 0;
411 	*(p + 1) = 0;
412 #else
413 	*dst = 0;
414 #endif
415 }
416 
417 extern struct timespec dmar_hw_timeout;
418 
419 #define	DMAR_WAIT_UNTIL(cond)					\
420 {								\
421 	struct timespec last, curr;				\
422 	bool forever;						\
423 								\
424 	if (dmar_hw_timeout.tv_sec == 0 &&			\
425 	    dmar_hw_timeout.tv_nsec == 0) {			\
426 		forever = true;					\
427 	} else {						\
428 		forever = false;				\
429 		nanouptime(&curr);				\
430 		timespecadd(&curr, &dmar_hw_timeout, &last);	\
431 	}							\
432 	for (;;) {						\
433 		if (cond) {					\
434 			error = 0;				\
435 			break;					\
436 		}						\
437 		nanouptime(&curr);				\
438 		if (!forever && timespeccmp(&last, &curr, <)) {	\
439 			error = ETIMEDOUT;			\
440 			break;					\
441 		}						\
442 		cpu_spinwait();					\
443 	}							\
444 }
445 
446 #ifdef INVARIANTS
447 #define	TD_PREP_PINNED_ASSERT						\
448 	int old_td_pinned;						\
449 	old_td_pinned = curthread->td_pinned
450 #define	TD_PINNED_ASSERT						\
451 	KASSERT(curthread->td_pinned == old_td_pinned,			\
452 	    ("pin count leak: %d %d %s:%d", curthread->td_pinned,	\
453 	    old_td_pinned, __FILE__, __LINE__))
454 #else
455 #define	TD_PREP_PINNED_ASSERT
456 #define	TD_PINNED_ASSERT
457 #endif
458 
459 #endif
460