1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2020 Oxide Computer Company 41 */ 42 43 /* 44 * Memory ranges are represented with an RB tree. On insertion, the range 45 * is checked for overlaps. On lookup, the key has the same base and limit 46 * so it can be searched within the range. 47 */ 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include <sys/types.h> 53 #include <sys/errno.h> 54 #include <sys/tree.h> 55 #include <machine/vmm.h> 56 57 #include <assert.h> 58 #include <err.h> 59 #include <pthread.h> 60 #include <stdio.h> 61 #include <stdlib.h> 62 63 #include "mem.h" 64 65 struct mmio_rb_range { 66 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 67 struct mem_range mr_param; 68 uint64_t mr_base; 69 uint64_t mr_end; 70 }; 71 72 struct mmio_rb_tree; 73 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 74 75 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 76 77 /* 78 * Per-vCPU cache. Since most accesses from a vCPU will be to 79 * consecutive addresses in a range, it makes sense to cache the 80 * result of a lookup. 81 */ 82 static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; 83 84 static pthread_rwlock_t mmio_rwlock; 85 86 static int 87 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 88 { 89 if (a->mr_end < b->mr_base) 90 return (-1); 91 else if (a->mr_base > b->mr_end) 92 return (1); 93 return (0); 94 } 95 96 static int 97 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 98 struct mmio_rb_range **entry) 99 { 100 struct mmio_rb_range find, *res; 101 102 find.mr_base = find.mr_end = addr; 103 104 res = RB_FIND(mmio_rb_tree, rbt, &find); 105 106 if (res != NULL) { 107 *entry = res; 108 return (0); 109 } 110 111 return (ENOENT); 112 } 113 114 static int 115 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 116 { 117 struct mmio_rb_range *overlap; 118 119 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 120 121 if (overlap != NULL) { 122 #ifdef RB_DEBUG 123 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n", 124 new->mr_base, new->mr_end, 125 overlap->mr_base, overlap->mr_end); 126 #endif 127 128 return (EEXIST); 129 } 130 131 return (0); 132 } 133 134 #if 0 135 static void 136 mmio_rb_dump(struct mmio_rb_tree *rbt) 137 { 138 int perror; 139 struct mmio_rb_range *np; 140 141 pthread_rwlock_rdlock(&mmio_rwlock); 142 RB_FOREACH(np, mmio_rb_tree, rbt) { 143 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 144 np->mr_param.name); 145 } 146 perror = pthread_rwlock_unlock(&mmio_rwlock); 147 assert(perror == 0); 148 } 149 #endif 150 151 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 152 153 typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, 154 struct mem_range *mr, void *arg); 155 156 static int 157 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 158 { 159 int error; 160 struct mem_range *mr = arg; 161 162 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 163 rval, mr->arg1, mr->arg2); 164 return (error); 165 } 166 167 static int 168 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 169 { 170 int error; 171 struct mem_range *mr = arg; 172 173 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 174 &wval, mr->arg1, mr->arg2); 175 return (error); 176 } 177 178 static int 179 access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, 180 void *arg) 181 { 182 struct mmio_rb_range *entry; 183 int err, perror, immutable; 184 185 pthread_rwlock_rdlock(&mmio_rwlock); 186 /* 187 * First check the per-vCPU cache 188 */ 189 if (mmio_hint[vcpu] && 190 paddr >= mmio_hint[vcpu]->mr_base && 191 paddr <= mmio_hint[vcpu]->mr_end) { 192 entry = mmio_hint[vcpu]; 193 } else 194 entry = NULL; 195 196 if (entry == NULL) { 197 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 198 /* Update the per-vCPU cache */ 199 mmio_hint[vcpu] = entry; 200 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 201 perror = pthread_rwlock_unlock(&mmio_rwlock); 202 assert(perror == 0); 203 return (ESRCH); 204 } 205 } 206 207 assert(entry != NULL); 208 209 /* 210 * An 'immutable' memory range is guaranteed to be never removed 211 * so there is no need to hold 'mmio_rwlock' while calling the 212 * handler. 213 * 214 * XXX writes to the PCIR_COMMAND register can cause register_mem() 215 * to be called. If the guest is using PCI extended config space 216 * to modify the PCIR_COMMAND register then register_mem() can 217 * deadlock on 'mmio_rwlock'. However by registering the extended 218 * config space window as 'immutable' the deadlock can be avoided. 219 */ 220 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); 221 if (immutable) { 222 perror = pthread_rwlock_unlock(&mmio_rwlock); 223 assert(perror == 0); 224 } 225 226 err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); 227 228 if (!immutable) { 229 perror = pthread_rwlock_unlock(&mmio_rwlock); 230 assert(perror == 0); 231 } 232 233 234 return (err); 235 } 236 237 static int 238 emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 239 void *arg) 240 { 241 struct vm_mmio *mmio; 242 int err = 0; 243 244 mmio = arg; 245 246 if (mmio->read != 0) { 247 err = mem_read(ctx, vcpu, paddr, &mmio->data, mmio->bytes, mr); 248 } else { 249 err = mem_write(ctx, vcpu, paddr, mmio->data, mmio->bytes, mr); 250 } 251 252 return (err); 253 } 254 255 int 256 emulate_mem(struct vmctx *ctx, int vcpu, struct vm_mmio *mmio) 257 { 258 return (access_memory(ctx, vcpu, mmio->gpa, emulate_mem_cb, mmio)); 259 } 260 261 struct rw_mem_args { 262 uint64_t *val; 263 int size; 264 int operation; 265 }; 266 267 static int 268 rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 269 void *arg) 270 { 271 struct rw_mem_args *rma; 272 273 rma = arg; 274 return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size, 275 rma->val, mr->arg1, mr->arg2)); 276 } 277 278 int 279 read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) 280 { 281 struct rw_mem_args rma; 282 283 rma.val = rval; 284 rma.size = size; 285 rma.operation = MEM_F_READ; 286 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 287 } 288 289 int 290 write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size) 291 { 292 struct rw_mem_args rma; 293 294 rma.val = &wval; 295 rma.size = size; 296 rma.operation = MEM_F_WRITE; 297 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 298 } 299 300 static int 301 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 302 { 303 struct mmio_rb_range *entry, *mrp; 304 int err, perror; 305 306 err = 0; 307 308 mrp = malloc(sizeof(struct mmio_rb_range)); 309 if (mrp == NULL) { 310 warn("%s: couldn't allocate memory for mrp\n", 311 __func__); 312 err = ENOMEM; 313 } else { 314 mrp->mr_param = *memp; 315 mrp->mr_base = memp->base; 316 mrp->mr_end = memp->base + memp->size - 1; 317 pthread_rwlock_wrlock(&mmio_rwlock); 318 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 319 err = mmio_rb_add(rbt, mrp); 320 perror = pthread_rwlock_unlock(&mmio_rwlock); 321 assert(perror == 0); 322 if (err) 323 free(mrp); 324 } 325 326 return (err); 327 } 328 329 int 330 register_mem(struct mem_range *memp) 331 { 332 333 return (register_mem_int(&mmio_rb_root, memp)); 334 } 335 336 int 337 register_mem_fallback(struct mem_range *memp) 338 { 339 340 return (register_mem_int(&mmio_rb_fallback, memp)); 341 } 342 343 int 344 unregister_mem(struct mem_range *memp) 345 { 346 struct mem_range *mr; 347 struct mmio_rb_range *entry = NULL; 348 int err, perror, i; 349 350 pthread_rwlock_wrlock(&mmio_rwlock); 351 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 352 if (err == 0) { 353 mr = &entry->mr_param; 354 assert(mr->name == memp->name); 355 assert(mr->base == memp->base && mr->size == memp->size); 356 assert((mr->flags & MEM_F_IMMUTABLE) == 0); 357 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 358 359 /* flush Per-vCPU cache */ 360 for (i=0; i < VM_MAXCPU; i++) { 361 if (mmio_hint[i] == entry) 362 mmio_hint[i] = NULL; 363 } 364 } 365 perror = pthread_rwlock_unlock(&mmio_rwlock); 366 assert(perror == 0); 367 368 if (entry) 369 free(entry); 370 371 return (err); 372 } 373 374 void 375 init_mem(void) 376 { 377 378 RB_INIT(&mmio_rb_root); 379 RB_INIT(&mmio_rb_fallback); 380 pthread_rwlock_init(&mmio_rwlock, NULL); 381 } 382