1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Memory ranges are represented with an RB tree. On insertion, the range 33 * is checked for overlaps. On lookup, the key has the same base and limit 34 * so it can be searched within the range. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/types.h> 41 #include <sys/tree.h> 42 #include <sys/errno.h> 43 #include <machine/vmm.h> 44 #include <machine/vmm_instruction_emul.h> 45 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <assert.h> 49 #include <pthread.h> 50 51 #include "mem.h" 52 53 struct mmio_rb_range { 54 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 55 struct mem_range mr_param; 56 uint64_t mr_base; 57 uint64_t mr_end; 58 }; 59 60 struct mmio_rb_tree; 61 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 62 63 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 64 65 /* 66 * Per-vCPU cache. Since most accesses from a vCPU will be to 67 * consecutive addresses in a range, it makes sense to cache the 68 * result of a lookup. 69 */ 70 static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; 71 72 static pthread_rwlock_t mmio_rwlock; 73 74 static int 75 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 76 { 77 if (a->mr_end < b->mr_base) 78 return (-1); 79 else if (a->mr_base > b->mr_end) 80 return (1); 81 return (0); 82 } 83 84 static int 85 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 86 struct mmio_rb_range **entry) 87 { 88 struct mmio_rb_range find, *res; 89 90 find.mr_base = find.mr_end = addr; 91 92 res = RB_FIND(mmio_rb_tree, rbt, &find); 93 94 if (res != NULL) { 95 *entry = res; 96 return (0); 97 } 98 99 return (ENOENT); 100 } 101 102 static int 103 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 104 { 105 struct mmio_rb_range *overlap; 106 107 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 108 109 if (overlap != NULL) { 110 #ifdef RB_DEBUG 111 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n", 112 new->mr_base, new->mr_end, 113 overlap->mr_base, overlap->mr_end); 114 #endif 115 116 return (EEXIST); 117 } 118 119 return (0); 120 } 121 122 #if 0 123 static void 124 mmio_rb_dump(struct mmio_rb_tree *rbt) 125 { 126 struct mmio_rb_range *np; 127 128 pthread_rwlock_rdlock(&mmio_rwlock); 129 RB_FOREACH(np, mmio_rb_tree, rbt) { 130 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 131 np->mr_param.name); 132 } 133 pthread_rwlock_unlock(&mmio_rwlock); 134 } 135 #endif 136 137 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 138 139 typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, 140 struct mem_range *mr, void *arg); 141 142 static int 143 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 144 { 145 int error; 146 struct mem_range *mr = arg; 147 148 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 149 rval, mr->arg1, mr->arg2); 150 return (error); 151 } 152 153 static int 154 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 155 { 156 int error; 157 struct mem_range *mr = arg; 158 159 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 160 &wval, mr->arg1, mr->arg2); 161 return (error); 162 } 163 164 static int 165 access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, 166 void *arg) 167 { 168 struct mmio_rb_range *entry; 169 int err, immutable; 170 171 pthread_rwlock_rdlock(&mmio_rwlock); 172 /* 173 * First check the per-vCPU cache 174 */ 175 if (mmio_hint[vcpu] && 176 paddr >= mmio_hint[vcpu]->mr_base && 177 paddr <= mmio_hint[vcpu]->mr_end) { 178 entry = mmio_hint[vcpu]; 179 } else 180 entry = NULL; 181 182 if (entry == NULL) { 183 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 184 /* Update the per-vCPU cache */ 185 mmio_hint[vcpu] = entry; 186 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 187 pthread_rwlock_unlock(&mmio_rwlock); 188 return (ESRCH); 189 } 190 } 191 192 assert(entry != NULL); 193 194 /* 195 * An 'immutable' memory range is guaranteed to be never removed 196 * so there is no need to hold 'mmio_rwlock' while calling the 197 * handler. 198 * 199 * XXX writes to the PCIR_COMMAND register can cause register_mem() 200 * to be called. If the guest is using PCI extended config space 201 * to modify the PCIR_COMMAND register then register_mem() can 202 * deadlock on 'mmio_rwlock'. However by registering the extended 203 * config space window as 'immutable' the deadlock can be avoided. 204 */ 205 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); 206 if (immutable) 207 pthread_rwlock_unlock(&mmio_rwlock); 208 209 err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); 210 211 if (!immutable) 212 pthread_rwlock_unlock(&mmio_rwlock); 213 214 return (err); 215 } 216 217 struct emulate_mem_args { 218 struct vie *vie; 219 struct vm_guest_paging *paging; 220 }; 221 222 static int 223 emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 224 void *arg) 225 { 226 struct emulate_mem_args *ema; 227 228 ema = arg; 229 return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging, 230 mem_read, mem_write, mr)); 231 } 232 233 int 234 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie, 235 struct vm_guest_paging *paging) 236 237 { 238 struct emulate_mem_args ema; 239 240 ema.vie = vie; 241 ema.paging = paging; 242 return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema)); 243 } 244 245 struct read_mem_args { 246 uint64_t *rval; 247 int size; 248 }; 249 250 static int 251 read_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 252 void *arg) 253 { 254 struct read_mem_args *rma; 255 256 rma = arg; 257 return (mr->handler(ctx, vcpu, MEM_F_READ, paddr, rma->size, 258 rma->rval, mr->arg1, mr->arg2)); 259 } 260 261 int 262 read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) 263 { 264 struct read_mem_args rma; 265 266 rma.rval = rval; 267 rma.size = size; 268 return (access_memory(ctx, vcpu, gpa, read_mem_cb, &rma)); 269 } 270 271 static int 272 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 273 { 274 struct mmio_rb_range *entry, *mrp; 275 int err; 276 277 err = 0; 278 279 mrp = malloc(sizeof(struct mmio_rb_range)); 280 281 if (mrp != NULL) { 282 mrp->mr_param = *memp; 283 mrp->mr_base = memp->base; 284 mrp->mr_end = memp->base + memp->size - 1; 285 pthread_rwlock_wrlock(&mmio_rwlock); 286 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 287 err = mmio_rb_add(rbt, mrp); 288 pthread_rwlock_unlock(&mmio_rwlock); 289 if (err) 290 free(mrp); 291 } else 292 err = ENOMEM; 293 294 return (err); 295 } 296 297 int 298 register_mem(struct mem_range *memp) 299 { 300 301 return (register_mem_int(&mmio_rb_root, memp)); 302 } 303 304 int 305 register_mem_fallback(struct mem_range *memp) 306 { 307 308 return (register_mem_int(&mmio_rb_fallback, memp)); 309 } 310 311 int 312 unregister_mem(struct mem_range *memp) 313 { 314 struct mem_range *mr; 315 struct mmio_rb_range *entry = NULL; 316 int err, i; 317 318 pthread_rwlock_wrlock(&mmio_rwlock); 319 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 320 if (err == 0) { 321 mr = &entry->mr_param; 322 assert(mr->name == memp->name); 323 assert(mr->base == memp->base && mr->size == memp->size); 324 assert((mr->flags & MEM_F_IMMUTABLE) == 0); 325 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 326 327 /* flush Per-vCPU cache */ 328 for (i=0; i < VM_MAXCPU; i++) { 329 if (mmio_hint[i] == entry) 330 mmio_hint[i] = NULL; 331 } 332 } 333 pthread_rwlock_unlock(&mmio_rwlock); 334 335 if (entry) 336 free(entry); 337 338 return (err); 339 } 340 341 void 342 init_mem(void) 343 { 344 345 RB_INIT(&mmio_rb_root); 346 RB_INIT(&mmio_rb_fallback); 347 pthread_rwlock_init(&mmio_rwlock, NULL); 348 } 349