1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * 6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #ifndef _DEV_IOMMU_IOMMU_H_ 32 #define _DEV_IOMMU_IOMMU_H_ 33 34 #include <vm/vm.h> 35 #include <vm/vm_page.h> 36 #include <dev/iommu/iommu_types.h> 37 38 struct bus_dma_tag_common; 39 struct iommu_map_entry; 40 TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry); 41 42 RB_HEAD(iommu_gas_entries_tree, iommu_map_entry); 43 RB_PROTOTYPE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 44 iommu_gas_cmp_entries); 45 46 struct iommu_qi_genseq { 47 u_int gen; 48 uint32_t seq; 49 }; 50 51 struct iommu_map_entry { 52 iommu_gaddr_t start; 53 iommu_gaddr_t end; 54 iommu_gaddr_t first; /* Least start in subtree */ 55 iommu_gaddr_t last; /* Greatest end in subtree */ 56 iommu_gaddr_t free_down; /* Max free space below the 57 current R/B tree node */ 58 u_int flags; 59 union { 60 TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* DMA map entries */ 61 struct iommu_map_entry *tlb_flush_next; 62 }; 63 RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */ 64 struct iommu_domain *domain; 65 struct iommu_qi_genseq gseq; 66 struct spglist pgtbl_free; 67 }; 68 69 struct iommu_unit { 70 struct mtx lock; 71 device_t dev; 72 int unit; 73 struct sysctl_ctx_list sysctl_ctx; 74 75 int dma_enabled; 76 77 /* Busdma delayed map load */ 78 struct task dmamap_load_task; 79 TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps; 80 struct taskqueue *delayed_taskqueue; 81 82 /* 83 * Bitmap of buses for which context must ignore slot:func, 84 * duplicating the page table pointer into all context table 85 * entries. This is a client-controlled quirk to support some 86 * NTBs. 87 */ 88 uint32_t buswide_ctxs[(PCI_BUSMAX + 1) / NBBY / sizeof(uint32_t)]; 89 }; 90 91 struct iommu_domain_map_ops { 92 int (*map)(struct iommu_domain *domain, struct iommu_map_entry *entry, 93 vm_page_t *ma, uint64_t pflags, int flags); 94 int (*unmap)(struct iommu_domain *domain, struct iommu_map_entry *entry, 95 int flags); 96 }; 97 98 /* 99 * Locking annotations: 100 * (u) - Protected by iommu unit lock 101 * (d) - Protected by domain lock 102 * (c) - Immutable after initialization 103 */ 104 105 struct iommu_domain { 106 struct iommu_unit *iommu; /* (c) */ 107 const struct iommu_domain_map_ops *ops; 108 struct mtx lock; /* (c) */ 109 struct task unload_task; /* (c) */ 110 u_int entries_cnt; /* (d) */ 111 struct iommu_map_entries_tailq unload_entries; /* (d) Entries to 112 unload */ 113 struct iommu_gas_entries_tree rb_root; /* (d) */ 114 struct iommu_map_entry *start_gap; /* (d) */ 115 iommu_gaddr_t end; /* (c) Highest address + 1 in 116 the guest AS */ 117 struct iommu_map_entry *first_place, *last_place; /* (d) */ 118 struct iommu_map_entry *msi_entry; /* (d) Arch-specific */ 119 iommu_gaddr_t msi_base; /* (d) Arch-specific */ 120 vm_paddr_t msi_phys; /* (d) Arch-specific */ 121 u_int flags; /* (u) */ 122 }; 123 124 struct iommu_ctx { 125 struct iommu_domain *domain; /* (c) */ 126 struct bus_dma_tag_iommu *tag; /* (c) Root tag */ 127 u_long loads; /* atomic updates, for stat only */ 128 u_long unloads; /* same */ 129 u_int flags; /* (u) */ 130 uint16_t rid; /* (c) pci RID */ 131 }; 132 133 /* struct iommu_ctx flags */ 134 #define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported, 135 last_fault_rec is valid */ 136 #define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the 137 ephemeral reference is kept 138 to prevent context destruction */ 139 140 #define IOMMU_DOMAIN_GAS_INITED 0x0001 141 #define IOMMU_DOMAIN_PGTBL_INITED 0x0002 142 #define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity 143 page table */ 144 #define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry, 145 cannot be turned off */ 146 147 #define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock) 148 #define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock) 149 #define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED) 150 151 #define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock) 152 #define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock) 153 #define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED) 154 155 void iommu_free_ctx(struct iommu_ctx *ctx); 156 void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx); 157 struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev, 158 uint16_t rid, bool id_mapped, bool rmrr_init); 159 struct iommu_unit *iommu_find(device_t dev, bool verbose); 160 void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free, 161 bool cansleep); 162 void iommu_domain_unload(struct iommu_domain *domain, 163 struct iommu_map_entries_tailq *entries, bool cansleep); 164 165 void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu); 166 struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu, 167 device_t dev, bool rmrr); 168 device_t iommu_get_requester(device_t dev, uint16_t *rid); 169 int iommu_init_busdma(struct iommu_unit *unit); 170 void iommu_fini_busdma(struct iommu_unit *unit); 171 172 void iommu_gas_init_domain(struct iommu_domain *domain); 173 void iommu_gas_fini_domain(struct iommu_domain *domain); 174 struct iommu_map_entry *iommu_gas_alloc_entry(struct iommu_domain *domain, 175 u_int flags); 176 void iommu_gas_free_entry(struct iommu_map_entry *entry); 177 void iommu_gas_free_space(struct iommu_map_entry *entry); 178 void iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 179 iommu_gaddr_t size); 180 int iommu_gas_map(struct iommu_domain *domain, 181 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 182 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res); 183 void iommu_gas_free_region(struct iommu_map_entry *entry); 184 int iommu_gas_map_region(struct iommu_domain *domain, 185 struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma); 186 int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 187 iommu_gaddr_t end, struct iommu_map_entry **entry0); 188 int iommu_gas_reserve_region_extend(struct iommu_domain *domain, 189 iommu_gaddr_t start, iommu_gaddr_t end); 190 191 void iommu_set_buswide_ctx(struct iommu_unit *unit, u_int busno); 192 bool iommu_is_buswide_ctx(struct iommu_unit *unit, u_int busno); 193 void iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain, 194 const struct iommu_domain_map_ops *ops); 195 void iommu_domain_fini(struct iommu_domain *domain); 196 197 bool bus_dma_iommu_set_buswide(device_t dev); 198 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 199 vm_paddr_t start, vm_size_t length, int flags); 200 201 bus_dma_tag_t iommu_get_dma_tag(device_t dev, device_t child); 202 struct iommu_ctx *iommu_get_dev_ctx(device_t dev); 203 struct iommu_domain *iommu_get_ctx_domain(struct iommu_ctx *ctx); 204 205 SYSCTL_DECL(_hw_iommu); 206 207 #endif /* !_DEV_IOMMU_IOMMU_H_ */ 208