xref: /freebsd/sys/dev/iommu/iommu.h (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef _DEV_IOMMU_IOMMU_H_
32 #define _DEV_IOMMU_IOMMU_H_
33 
34 #include <sys/_task.h>
35 #include <vm/vm.h>
36 #include <vm/vm_page.h>
37 #include <dev/iommu/iommu_types.h>
38 #include <dev/pci/pcireg.h>
39 
40 struct bus_dma_tag_common;
41 struct iommu_map_entry;
42 TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
43 
44 RB_HEAD(iommu_gas_entries_tree, iommu_map_entry);
45 RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
46     iommu_gas_cmp_entries);
47 
48 struct iommu_qi_genseq {
49 	u_int gen;
50 	uint32_t seq;
51 };
52 
53 struct iommu_map_entry {
54 	iommu_gaddr_t start;
55 	iommu_gaddr_t end;
56 	iommu_gaddr_t first;		/* Least start in subtree */
57 	iommu_gaddr_t last;		/* Greatest end in subtree */
58 	iommu_gaddr_t free_down;	/* Max free space below the
59 					   current R/B tree node */
60 	u_int flags;
61 	union {
62 		TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* DMA map entries */
63 		struct iommu_map_entry *tlb_flush_next;
64 	};
65 	RB_ENTRY(iommu_map_entry) rb_entry;	 /* Links for domain entries */
66 	struct iommu_domain *domain;
67 	struct iommu_qi_genseq gseq;
68 	struct spglist pgtbl_free;
69 };
70 
71 struct iommu_unit {
72 	struct mtx lock;
73 	device_t dev;
74 	int unit;
75 	struct sysctl_ctx_list sysctl_ctx;
76 
77 	int dma_enabled;
78 
79 	/* Busdma delayed map load */
80 	struct task dmamap_load_task;
81 	TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
82 	struct taskqueue *delayed_taskqueue;
83 
84 	/*
85 	 * Bitmap of buses for which context must ignore slot:func,
86 	 * duplicating the page table pointer into all context table
87 	 * entries.  This is a client-controlled quirk to support some
88 	 * NTBs.
89 	 */
90 	uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)];
91 };
92 
93 struct iommu_domain_map_ops {
94 	int (*map)(struct iommu_domain *domain, struct iommu_map_entry *entry,
95 	    vm_page_t *ma, uint64_t pflags, int flags);
96 	int (*unmap)(struct iommu_domain *domain, struct iommu_map_entry *entry,
97 	    int flags);
98 };
99 
100 /*
101  * Locking annotations:
102  * (u) - Protected by iommu unit lock
103  * (d) - Protected by domain lock
104  * (c) - Immutable after initialization
105  */
106 
107 struct iommu_domain {
108 	struct iommu_unit *iommu;	/* (c) */
109 	const struct iommu_domain_map_ops *ops;
110 	struct mtx lock;		/* (c) */
111 	struct task unload_task;	/* (c) */
112 	u_int entries_cnt;		/* (d) */
113 	struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
114 							 unload */
115 	struct iommu_gas_entries_tree rb_root; /* (d) */
116 	struct iommu_map_entry *start_gap;     /* (d) */
117 	iommu_gaddr_t end;		/* (c) Highest address + 1 in
118 					   the guest AS */
119 	struct iommu_map_entry *first_place, *last_place; /* (d) */
120 	struct iommu_map_entry *msi_entry; /* (d) Arch-specific */
121 	iommu_gaddr_t msi_base;		/* (d) Arch-specific */
122 	vm_paddr_t msi_phys;		/* (d) Arch-specific */
123 	u_int flags;			/* (u) */
124 	LIST_HEAD(, iommu_ctx) contexts;/* (u) */
125 };
126 
127 struct iommu_ctx {
128 	struct iommu_domain *domain;	/* (c) */
129 	struct bus_dma_tag_iommu *tag;	/* (c) Root tag */
130 	LIST_ENTRY(iommu_ctx) link;	/* (u) Member in the domain list */
131 	u_int refs;			/* (u) References from tags */
132 	u_long loads;			/* atomic updates, for stat only */
133 	u_long unloads;			/* same */
134 	u_int flags;			/* (u) */
135 	uint16_t rid;			/* (c) pci RID */
136 };
137 
138 /* struct iommu_ctx flags */
139 #define	IOMMU_CTX_FAULTED	0x0001	/* Fault was reported,
140 					   last_fault_rec is valid */
141 #define	IOMMU_CTX_DISABLED	0x0002	/* Device is disabled, the
142 					   ephemeral reference is kept
143 					   to prevent context destruction */
144 
145 #define	IOMMU_DOMAIN_GAS_INITED		0x0001
146 #define	IOMMU_DOMAIN_PGTBL_INITED	0x0002
147 #define	IOMMU_DOMAIN_IDMAP		0x0010	/* Domain uses identity
148 						   page table */
149 #define	IOMMU_DOMAIN_RMRR		0x0020	/* Domain contains RMRR entry,
150 						   cannot be turned off */
151 
152 #define	IOMMU_LOCK(unit)		mtx_lock(&(unit)->lock)
153 #define	IOMMU_UNLOCK(unit)		mtx_unlock(&(unit)->lock)
154 #define	IOMMU_ASSERT_LOCKED(unit)	mtx_assert(&(unit)->lock, MA_OWNED)
155 
156 #define	IOMMU_DOMAIN_LOCK(dom)		mtx_lock(&(dom)->lock)
157 #define	IOMMU_DOMAIN_UNLOCK(dom)	mtx_unlock(&(dom)->lock)
158 #define	IOMMU_DOMAIN_ASSERT_LOCKED(dom)	mtx_assert(&(dom)->lock, MA_OWNED)
159 
160 void iommu_free_ctx(struct iommu_ctx *ctx);
161 void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
162 struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
163     uint16_t rid, bool id_mapped, bool rmrr_init);
164 struct iommu_unit *iommu_find(device_t dev, bool verbose);
165 void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
166     bool cansleep);
167 void iommu_domain_unload(struct iommu_domain *domain,
168     struct iommu_map_entries_tailq *entries, bool cansleep);
169 
170 void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu);
171 struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
172     device_t dev, bool rmrr);
173 device_t iommu_get_requester(device_t dev, uint16_t *rid);
174 int iommu_init_busdma(struct iommu_unit *unit);
175 void iommu_fini_busdma(struct iommu_unit *unit);
176 
177 void iommu_gas_init_domain(struct iommu_domain *domain);
178 void iommu_gas_fini_domain(struct iommu_domain *domain);
179 struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain,
180     u_int flags);
181 void iommu_gas_free_entry(struct iommu_map_entry *entry);
182 void iommu_gas_free_space(struct iommu_map_entry *entry);
183 void iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start,
184     iommu_gaddr_t size);
185 int iommu_gas_map(struct iommu_domain *domain,
186     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
187     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
188 void iommu_gas_free_region(struct iommu_map_entry *entry);
189 int iommu_gas_map_region(struct iommu_domain *domain,
190     struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
191 int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
192     iommu_gaddr_t end, struct iommu_map_entry **entry0);
193 int iommu_gas_reserve_region_extend(struct iommu_domain *domain,
194     iommu_gaddr_t start, iommu_gaddr_t end);
195 
196 void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
197 bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno);
198 void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain,
199     const struct iommu_domain_map_ops *ops);
200 void iommu_domain_fini(struct iommu_domain *domain);
201 
202 bool bus_dma_iommu_set_buswide(device_t dev);
203 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
204     vm_paddr_t start, vm_size_t length, int flags);
205 
206 bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child);
207 struct iommu_ctx *iommu_get_dev_ctx(device_t dev);
208 struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx);
209 
210 SYSCTL_DECL(_hw_iommu);
211 
212 #endif /* !_DEV_IOMMU_IOMMU_H_ */
213