1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com> 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory (Department of Computer Science and 8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the 9 * DARPA SSITH research programme. 10 * 11 * Portions of this work was supported by Innovate UK project 105694, 12 * "Digital Security by Design (DSbD) Technology Platform Prototype". 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include "opt_platform.h" 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/bus.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/memdesc.h> 46 #include <sys/tree.h> 47 #include <sys/taskqueue.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/sx.h> 51 #include <sys/sysctl.h> 52 #include <vm/vm.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 #include <machine/bus.h> 57 #include <dev/iommu/busdma_iommu.h> 58 #include <machine/vmparam.h> 59 60 #include "iommu.h" 61 #include "iommu_if.h" 62 63 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework"); 64 65 #define IOMMU_LIST_LOCK() sx_xlock(&iommu_sx) 66 #define IOMMU_LIST_UNLOCK() sx_xunlock(&iommu_sx) 67 #define IOMMU_LIST_ASSERT_LOCKED() sx_assert(&iommu_sx, SA_XLOCKED) 68 69 #define dprintf(fmt, ...) 70 71 static struct sx iommu_sx; 72 73 struct iommu_entry { 74 struct iommu_unit *iommu; 75 LIST_ENTRY(iommu_entry) next; 76 }; 77 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list); 78 79 static int 80 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, 81 iommu_gaddr_t size, int flags) 82 { 83 struct iommu_unit *iommu; 84 int error; 85 86 iommu = iodom->iommu; 87 88 error = IOMMU_UNMAP(iommu->dev, iodom, base, size); 89 90 return (error); 91 } 92 93 static int 94 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, 95 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags) 96 { 97 struct iommu_unit *iommu; 98 vm_prot_t prot; 99 vm_offset_t va; 100 int error; 101 102 dprintf("%s: base %lx, size %lx\n", __func__, base, size); 103 104 prot = 0; 105 if (eflags & IOMMU_MAP_ENTRY_READ) 106 prot |= VM_PROT_READ; 107 if (eflags & IOMMU_MAP_ENTRY_WRITE) 108 prot |= VM_PROT_WRITE; 109 110 va = base; 111 112 iommu = iodom->iommu; 113 114 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot); 115 116 return (error); 117 } 118 119 static const struct iommu_domain_map_ops domain_map_ops = { 120 .map = iommu_domain_map_buf, 121 .unmap = iommu_domain_unmap_buf, 122 }; 123 124 static struct iommu_domain * 125 iommu_domain_alloc(struct iommu_unit *iommu) 126 { 127 struct iommu_domain *iodom; 128 129 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu); 130 if (iodom == NULL) 131 return (NULL); 132 133 iommu_domain_init(iommu, iodom, &domain_map_ops); 134 iodom->end = VM_MAXUSER_ADDRESS; 135 iodom->iommu = iommu; 136 iommu_gas_init_domain(iodom); 137 138 return (iodom); 139 } 140 141 static int 142 iommu_domain_free(struct iommu_domain *iodom) 143 { 144 struct iommu_unit *iommu; 145 146 iommu = iodom->iommu; 147 148 IOMMU_LOCK(iommu); 149 150 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) { 151 IOMMU_DOMAIN_LOCK(iodom); 152 iommu_gas_fini_domain(iodom); 153 IOMMU_DOMAIN_UNLOCK(iodom); 154 } 155 156 iommu_domain_fini(iodom); 157 158 IOMMU_DOMAIN_FREE(iommu->dev, iodom); 159 IOMMU_UNLOCK(iommu); 160 161 return (0); 162 } 163 164 static void 165 iommu_tag_init(struct bus_dma_tag_iommu *t) 166 { 167 bus_addr_t maxaddr; 168 169 maxaddr = BUS_SPACE_MAXADDR; 170 171 t->common.ref_count = 0; 172 t->common.impl = &bus_dma_iommu_impl; 173 t->common.alignment = 1; 174 t->common.boundary = 0; 175 t->common.lowaddr = maxaddr; 176 t->common.highaddr = maxaddr; 177 t->common.maxsize = maxaddr; 178 t->common.nsegments = BUS_SPACE_UNRESTRICTED; 179 t->common.maxsegsz = maxaddr; 180 } 181 182 static struct iommu_ctx * 183 iommu_ctx_alloc(device_t dev, struct iommu_domain *iodom, bool disabled) 184 { 185 struct iommu_unit *iommu; 186 struct iommu_ctx *ioctx; 187 188 iommu = iodom->iommu; 189 190 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, dev, disabled); 191 if (ioctx == NULL) 192 return (NULL); 193 194 /* 195 * iommu can also be used for non-PCI based devices. 196 * This should be reimplemented as new newbus method with 197 * pci_get_rid() as a default for PCI device class. 198 */ 199 ioctx->rid = pci_get_rid(dev); 200 201 return (ioctx); 202 } 203 204 struct iommu_ctx * 205 iommu_get_ctx(struct iommu_unit *iommu, device_t requester, 206 uint16_t rid, bool disabled, bool rmrr) 207 { 208 struct iommu_ctx *ioctx; 209 struct iommu_domain *iodom; 210 struct bus_dma_tag_iommu *tag; 211 212 IOMMU_LOCK(iommu); 213 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester); 214 if (ioctx) { 215 IOMMU_UNLOCK(iommu); 216 return (ioctx); 217 } 218 IOMMU_UNLOCK(iommu); 219 220 /* 221 * In our current configuration we have a domain per each ctx. 222 * So allocate a domain first. 223 */ 224 iodom = iommu_domain_alloc(iommu); 225 if (iodom == NULL) 226 return (NULL); 227 228 ioctx = iommu_ctx_alloc(requester, iodom, disabled); 229 if (ioctx == NULL) { 230 iommu_domain_free(iodom); 231 return (NULL); 232 } 233 234 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu), 235 M_IOMMU, M_WAITOK | M_ZERO); 236 tag->owner = requester; 237 tag->ctx = ioctx; 238 tag->ctx->domain = iodom; 239 240 iommu_tag_init(tag); 241 242 ioctx->domain = iodom; 243 244 return (ioctx); 245 } 246 247 void 248 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx) 249 { 250 struct bus_dma_tag_iommu *tag; 251 252 IOMMU_ASSERT_LOCKED(iommu); 253 254 tag = ioctx->tag; 255 256 IOMMU_CTX_FREE(iommu->dev, ioctx); 257 258 free(tag, M_IOMMU); 259 } 260 261 void 262 iommu_free_ctx(struct iommu_ctx *ioctx) 263 { 264 struct iommu_unit *iommu; 265 struct iommu_domain *iodom; 266 int error; 267 268 iodom = ioctx->domain; 269 iommu = iodom->iommu; 270 271 IOMMU_LOCK(iommu); 272 iommu_free_ctx_locked(iommu, ioctx); 273 IOMMU_UNLOCK(iommu); 274 275 /* Since we have a domain per each ctx, remove the domain too. */ 276 error = iommu_domain_free(iodom); 277 if (error) 278 device_printf(iommu->dev, "Could not free a domain\n"); 279 } 280 281 static void 282 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free) 283 { 284 struct iommu_domain *iodom; 285 286 iodom = entry->domain; 287 288 IOMMU_DOMAIN_LOCK(iodom); 289 iommu_gas_free_space(iodom, entry); 290 IOMMU_DOMAIN_UNLOCK(iodom); 291 292 if (free) 293 iommu_gas_free_entry(iodom, entry); 294 else 295 entry->flags = 0; 296 } 297 298 void 299 iommu_domain_unload(struct iommu_domain *iodom, 300 struct iommu_map_entries_tailq *entries, bool cansleep) 301 { 302 struct iommu_map_entry *entry, *entry1; 303 int error __diagused; 304 305 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 306 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, 307 ("not mapped entry %p %p", iodom, entry)); 308 error = iodom->ops->unmap(iodom, entry->start, entry->end - 309 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0); 310 KASSERT(error == 0, ("unmap %p error %d", iodom, error)); 311 TAILQ_REMOVE(entries, entry, dmamap_link); 312 iommu_domain_free_entry(entry, true); 313 } 314 315 if (TAILQ_EMPTY(entries)) 316 return; 317 318 panic("entries map is not empty"); 319 } 320 321 int 322 iommu_register(struct iommu_unit *iommu) 323 { 324 struct iommu_entry *entry; 325 326 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF); 327 328 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO); 329 entry->iommu = iommu; 330 331 IOMMU_LIST_LOCK(); 332 LIST_INSERT_HEAD(&iommu_list, entry, next); 333 IOMMU_LIST_UNLOCK(); 334 335 iommu_init_busdma(iommu); 336 337 return (0); 338 } 339 340 int 341 iommu_unregister(struct iommu_unit *iommu) 342 { 343 struct iommu_entry *entry, *tmp; 344 345 IOMMU_LIST_LOCK(); 346 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) { 347 if (entry->iommu == iommu) { 348 LIST_REMOVE(entry, next); 349 free(entry, M_IOMMU); 350 } 351 } 352 IOMMU_LIST_UNLOCK(); 353 354 iommu_fini_busdma(iommu); 355 356 mtx_destroy(&iommu->lock); 357 358 return (0); 359 } 360 361 struct iommu_unit * 362 iommu_find(device_t dev, bool verbose) 363 { 364 struct iommu_entry *entry; 365 struct iommu_unit *iommu; 366 int error; 367 368 IOMMU_LIST_LOCK(); 369 LIST_FOREACH(entry, &iommu_list, next) { 370 iommu = entry->iommu; 371 error = IOMMU_FIND(iommu->dev, dev); 372 if (error == 0) { 373 IOMMU_LIST_UNLOCK(); 374 return (entry->iommu); 375 } 376 } 377 IOMMU_LIST_UNLOCK(); 378 379 return (NULL); 380 } 381 382 void 383 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free) 384 { 385 386 dprintf("%s\n", __func__); 387 388 iommu_domain_free_entry(entry, free); 389 } 390 391 static void 392 iommu_init(void) 393 { 394 395 sx_init(&iommu_sx, "IOMMU list"); 396 } 397 398 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL); 399