xref: /freebsd/sys/x86/iommu/x86_iommu.h (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013-2015, 2024 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef __X86_IOMMU_X86_IOMMU_H
32 #define	__X86_IOMMU_X86_IOMMU_H
33 
34 /* Both Intel and AMD are not too crazy to have different sizes. */
35 typedef struct iommu_pte {
36 	uint64_t pte;
37 } iommu_pte_t;
38 
39 #define	IOMMU_PAGE_SIZE		PAGE_SIZE
40 #define	IOMMU_PAGE_MASK		(IOMMU_PAGE_SIZE - 1)
41 #define	IOMMU_PAGE_SHIFT	PAGE_SHIFT
42 #define	IOMMU_NPTEPG		(IOMMU_PAGE_SIZE / sizeof(iommu_pte_t))
43 #define	IOMMU_NPTEPGSHIFT 	9
44 #define	IOMMU_PTEMASK		(IOMMU_NPTEPG - 1)
45 
46 struct sf_buf;
47 struct vm_object;
48 
49 struct vm_page *iommu_pgalloc(struct vm_object *obj, vm_pindex_t idx,
50     int flags);
51 void iommu_pgfree(struct vm_object *obj, vm_pindex_t idx, int flags,
52     struct iommu_map_entry *entry);
53 void *iommu_map_pgtbl(struct vm_object *obj, vm_pindex_t idx, int flags,
54     struct sf_buf **sf);
55 void iommu_unmap_pgtbl(struct sf_buf *sf);
56 
57 extern iommu_haddr_t iommu_high;
58 extern int iommu_tbl_pagecnt;
59 extern int iommu_qi_batch_coalesce;
60 
61 SYSCTL_DECL(_hw_iommu);
62 
63 struct x86_unit_common;
64 
65 struct x86_iommu {
66 	struct x86_unit_common *(*get_x86_common)(struct
67 	    iommu_unit *iommu);
68 	void (*unit_pre_instantiate_ctx)(struct iommu_unit *iommu);
69 	void (*qi_ensure)(struct iommu_unit *unit, int descr_count);
70 	void (*qi_emit_wait_descr)(struct iommu_unit *unit, uint32_t seq,
71 	    bool, bool, bool);
72 	void (*qi_advance_tail)(struct iommu_unit *unit);
73 	void (*qi_invalidate_emit)(struct iommu_domain *idomain,
74 	    iommu_gaddr_t base, iommu_gaddr_t size, struct iommu_qi_genseq *
75 	    pseq, bool emit_wait);
76 	void (*domain_unload_entry)(struct iommu_map_entry *entry, bool free,
77 	    bool cansleep);
78 	void (*domain_unload)(struct iommu_domain *iodom,
79 		struct iommu_map_entries_tailq *entries, bool cansleep);
80 	struct iommu_ctx *(*get_ctx)(struct iommu_unit *iommu,
81 	    device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init);
82 	void (*free_ctx_locked)(struct iommu_unit *iommu,
83 	    struct iommu_ctx *context);
84 	void (*free_ctx)(struct iommu_ctx *context);
85 	struct iommu_unit *(*find)(device_t dev, bool verbose);
86 	int (*alloc_msi_intr)(device_t src, u_int *cookies, u_int count);
87 	int (*map_msi_intr)(device_t src, u_int cpu, u_int vector,
88 	    u_int cookie, uint64_t *addr, uint32_t *data);
89 	int (*unmap_msi_intr)(device_t src, u_int cookie);
90 	int (*map_ioapic_intr)(u_int ioapic_id, u_int cpu, u_int vector,
91 	    bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
92 	    uint32_t *lo);
93 	int (*unmap_ioapic_intr)(u_int ioapic_id, u_int *cookie);
94 };
95 void set_x86_iommu(struct x86_iommu *);
96 struct x86_iommu *get_x86_iommu(void);
97 
98 struct iommu_msi_data {
99 	int irq;
100 	int irq_rid;
101 	struct resource *irq_res;
102 	void *intr_handle;
103 	int (*handler)(void *);
104 	int msi_data_reg;
105 	int msi_addr_reg;
106 	int msi_uaddr_reg;
107 	uint64_t msi_addr;
108 	uint32_t msi_data;
109 	void (*enable_intr)(struct iommu_unit *);
110 	void (*disable_intr)(struct iommu_unit *);
111 	const char *name;
112 };
113 
114 #define	IOMMU_MAX_MSI	3
115 
116 struct x86_unit_common {
117 	uint32_t qi_buf_maxsz;
118 	uint32_t qi_cmd_sz;
119 
120 	char *inv_queue;
121 	vm_size_t inv_queue_size;
122 	uint32_t inv_queue_avail;
123 	uint32_t inv_queue_tail;
124 
125 	/*
126 	 * Hw writes there on completion of wait descriptor
127 	 * processing.  Intel writes 4 bytes, while AMD does the
128 	 * 8-bytes write.  Due to little-endian, and use of 4-byte
129 	 * sequence numbers, the difference does not matter for us.
130 	 */
131 	volatile uint64_t inv_waitd_seq_hw;
132 
133 	uint64_t inv_waitd_seq_hw_phys;
134 	uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
135 	u_int inv_waitd_gen;	/* seq number generation AKA seq overflows */
136 	u_int inv_seq_waiters;	/* count of waiters for seq */
137 	u_int inv_queue_full;	/* informational counter */
138 
139 	/*
140 	 * Delayed freeing of map entries queue processing:
141 	 *
142 	 * tlb_flush_head and tlb_flush_tail are used to implement a FIFO
143 	 * queue that supports concurrent dequeues and enqueues.  However,
144 	 * there can only be a single dequeuer (accessing tlb_flush_head) and
145 	 * a single enqueuer (accessing tlb_flush_tail) at a time.  Since the
146 	 * unit's qi_task is the only dequeuer, it can access tlb_flush_head
147 	 * without any locking.  In contrast, there may be multiple enqueuers,
148 	 * so the enqueuers acquire the iommu unit lock to serialize their
149 	 * accesses to tlb_flush_tail.
150 	 *
151 	 * In this FIFO queue implementation, the key to enabling concurrent
152 	 * dequeues and enqueues is that the dequeuer never needs to access
153 	 * tlb_flush_tail and the enqueuer never needs to access
154 	 * tlb_flush_head.  In particular, tlb_flush_head and tlb_flush_tail
155 	 * are never NULL, so neither a dequeuer nor an enqueuer ever needs to
156 	 * update both.  Instead, tlb_flush_head always points to a "zombie"
157 	 * struct, which previously held the last dequeued item.  Thus, the
158 	 * zombie's next field actually points to the struct holding the first
159 	 * item in the queue.  When an item is dequeued, the current zombie is
160 	 * finally freed, and the struct that held the just dequeued item
161 	 * becomes the new zombie.  When the queue is empty, tlb_flush_tail
162 	 * also points to the zombie.
163 	 */
164 	struct iommu_map_entry *tlb_flush_head;
165 	struct iommu_map_entry *tlb_flush_tail;
166 	struct task qi_task;
167 	struct taskqueue *qi_taskqueue;
168 
169 	struct iommu_msi_data intrs[IOMMU_MAX_MSI];
170 };
171 
172 void iommu_domain_free_entry(struct iommu_map_entry *entry, bool free);
173 
174 void iommu_qi_emit_wait_seq(struct iommu_unit *unit, struct iommu_qi_genseq *
175     pseq, bool emit_wait);
176 void iommu_qi_wait_for_seq(struct iommu_unit *unit, const struct
177     iommu_qi_genseq *gseq, bool nowait);
178 void iommu_qi_drain_tlb_flush(struct iommu_unit *unit);
179 void iommu_qi_invalidate_locked(struct iommu_domain *domain,
180     struct iommu_map_entry *entry, bool emit_wait);
181 void iommu_qi_invalidate_sync(struct iommu_domain *domain, iommu_gaddr_t base,
182     iommu_gaddr_t size, bool cansleep);
183 void iommu_qi_common_init(struct iommu_unit *unit, task_fn_t taskfunc);
184 void iommu_qi_common_fini(struct iommu_unit *unit, void (*disable_qi)(
185     struct iommu_unit *));
186 
187 int iommu_alloc_irq(struct iommu_unit *unit, int idx);
188 void iommu_release_intr(struct iommu_unit *unit, int idx);
189 
190 void iommu_device_tag_init(struct iommu_ctx *ctx, device_t dev);
191 
192 int pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl);
193 vm_pindex_t pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl);
194 vm_pindex_t pglvl_max_pages(int pglvl);
195 iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
196 
197 void iommu_db_print_domain_entry(const struct iommu_map_entry *entry);
198 void iommu_db_print_ctx(struct iommu_ctx *ctx);
199 void iommu_db_domain_print_contexts(struct iommu_domain *iodom);
200 void iommu_db_domain_print_mappings(struct iommu_domain *iodom);
201 
202 #endif
203