1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #ifndef _DEV_IOMMU_IOMMU_H_ 35 #define _DEV_IOMMU_IOMMU_H_ 36 37 #include <dev/iommu/iommu_types.h> 38 39 struct bus_dma_tag_common; 40 struct iommu_map_entry; 41 TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry); 42 43 RB_HEAD(iommu_gas_entries_tree, iommu_map_entry); 44 RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 45 iommu_gas_cmp_entries); 46 47 struct iommu_qi_genseq { 48 u_int gen; 49 uint32_t seq; 50 }; 51 52 struct iommu_map_entry { 53 iommu_gaddr_t start; 54 iommu_gaddr_t end; 55 iommu_gaddr_t first; /* Least start in subtree */ 56 iommu_gaddr_t last; /* Greatest end in subtree */ 57 iommu_gaddr_t free_down; /* Max free space below the 58 current R/B tree node */ 59 u_int flags; 60 TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */ 61 RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */ 62 TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after 63 dmamap_load failure */ 64 struct iommu_domain *domain; 65 struct iommu_qi_genseq gseq; 66 }; 67 68 #define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */ 69 #define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by 70 dmamap_link */ 71 #define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by 72 dmamap_link */ 73 #define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */ 74 #define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */ 75 #define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */ 76 #define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */ 77 #define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */ 78 #define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */ 79 80 struct iommu_unit { 81 struct mtx lock; 82 int unit; 83 84 int dma_enabled; 85 86 /* Busdma delayed map load */ 87 struct task dmamap_load_task; 88 TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps; 89 struct taskqueue *delayed_taskqueue; 90 91 /* 92 * Bitmap of buses for which context must ignore slot:func, 93 * duplicating the page table pointer into all context table 94 * entries. This is a client-controlled quirk to support some 95 * NTBs. 96 */ 97 uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; 98 }; 99 100 struct iommu_domain_map_ops { 101 int (*map)(struct iommu_domain *domain, iommu_gaddr_t base, 102 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags); 103 int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base, 104 iommu_gaddr_t size, int flags); 105 }; 106 107 /* 108 * Locking annotations: 109 * (u) - Protected by iommu unit lock 110 * (d) - Protected by domain lock 111 * (c) - Immutable after initialization 112 */ 113 114 struct iommu_domain { 115 struct iommu_unit *iommu; /* (c) */ 116 const struct iommu_domain_map_ops *ops; 117 struct mtx lock; /* (c) */ 118 struct task unload_task; /* (c) */ 119 u_int entries_cnt; /* (d) */ 120 struct iommu_map_entries_tailq unload_entries; /* (d) Entries to 121 unload */ 122 struct iommu_gas_entries_tree rb_root; /* (d) */ 123 iommu_gaddr_t end; /* (c) Highest address + 1 in 124 the guest AS */ 125 struct iommu_map_entry *first_place, *last_place; /* (d) */ 126 u_int flags; /* (u) */ 127 }; 128 129 struct iommu_ctx { 130 struct iommu_domain *domain; /* (c) */ 131 struct bus_dma_tag_iommu *tag; /* (c) Root tag */ 132 u_long loads; /* atomic updates, for stat only */ 133 u_long unloads; /* same */ 134 u_int flags; /* (u) */ 135 uint16_t rid; /* (c) pci RID */ 136 }; 137 138 /* struct iommu_ctx flags */ 139 #define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported, 140 last_fault_rec is valid */ 141 #define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the 142 ephemeral reference is kept 143 to prevent context destruction */ 144 145 #define IOMMU_DOMAIN_GAS_INITED 0x0001 146 #define IOMMU_DOMAIN_PGTBL_INITED 0x0002 147 #define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity 148 page table */ 149 #define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, 150 cannot be turned off */ 151 152 /* Map flags */ 153 #define IOMMU_MF_CANWAIT 0x0001 154 #define IOMMU_MF_CANSPLIT 0x0002 155 #define IOMMU_MF_RMRR 0x0004 156 157 #define IOMMU_PGF_WAITOK 0x0001 158 #define IOMMU_PGF_ZERO 0x0002 159 #define IOMMU_PGF_ALLOC 0x0004 160 #define IOMMU_PGF_NOALLOC 0x0008 161 #define IOMMU_PGF_OBJL 0x0010 162 163 #define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock) 164 #define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock) 165 #define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED) 166 167 #define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock) 168 #define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock) 169 #define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED) 170 171 static inline bool 172 iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size, 173 iommu_gaddr_t boundary) 174 { 175 176 if (boundary == 0) 177 return (true); 178 return (start + size <= ((start + boundary) & ~(boundary - 1))); 179 } 180 181 void iommu_free_ctx(struct iommu_ctx *ctx); 182 void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx); 183 struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev, 184 uint16_t rid, bool id_mapped, bool rmrr_init); 185 struct iommu_unit *iommu_find(device_t dev, bool verbose); 186 void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free); 187 void iommu_domain_unload(struct iommu_domain *domain, 188 struct iommu_map_entries_tailq *entries, bool cansleep); 189 190 struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu, 191 device_t dev, bool rmrr); 192 device_t iommu_get_requester(device_t dev, uint16_t *rid); 193 int iommu_init_busdma(struct iommu_unit *unit); 194 void iommu_fini_busdma(struct iommu_unit *unit); 195 struct iommu_map_entry *iommu_map_alloc_entry(struct iommu_domain *iodom, 196 u_int flags); 197 void iommu_map_free_entry(struct iommu_domain *, struct iommu_map_entry *); 198 int iommu_map(struct iommu_domain *iodom, 199 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 200 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); 201 int iommu_map_region(struct iommu_domain *domain, 202 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); 203 204 void iommu_gas_init_domain(struct iommu_domain *domain); 205 void iommu_gas_fini_domain(struct iommu_domain *domain); 206 struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain, 207 u_int flags); 208 void iommu_gas_free_entry(struct iommu_domain *domain, 209 struct iommu_map_entry *entry); 210 void iommu_gas_free_space(struct iommu_domain *domain, 211 struct iommu_map_entry *entry); 212 int iommu_gas_map(struct iommu_domain *domain, 213 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 214 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); 215 void iommu_gas_free_region(struct iommu_domain *domain, 216 struct iommu_map_entry *entry); 217 int iommu_gas_map_region(struct iommu_domain *domain, 218 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); 219 int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 220 iommu_gaddr_t end); 221 222 void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno); 223 bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno); 224 void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain, 225 const struct iommu_domain_map_ops *ops); 226 void iommu_domain_fini(struct iommu_domain *domain); 227 228 bool bus_dma_iommu_set_buswide(device_t dev); 229 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 230 vm_paddr_t start, vm_size_t length, int flags); 231 232 bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child); 233 struct iommu_ctx *iommu_get_dev_ctx(device_t dev); 234 struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx); 235 236 SYSCTL_DECL(_hw_iommu); 237 238 #endif /* !_DEV_IOMMU_IOMMU_H_ */ 239