1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Memory ranges are represented with an RB tree. On insertion, the range 33 * is checked for overlaps. On lookup, the key has the same base and limit 34 * so it can be searched within the range. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/types.h> 41 #include <sys/errno.h> 42 #include <sys/tree.h> 43 #include <machine/vmm.h> 44 #include <machine/vmm_instruction_emul.h> 45 46 #include <assert.h> 47 #include <err.h> 48 #include <pthread.h> 49 #include <stdio.h> 50 #include <stdlib.h> 51 52 #include "mem.h" 53 54 struct mmio_rb_range { 55 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 56 struct mem_range mr_param; 57 uint64_t mr_base; 58 uint64_t mr_end; 59 }; 60 61 struct mmio_rb_tree; 62 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 63 64 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 65 66 /* 67 * Per-vCPU cache. Since most accesses from a vCPU will be to 68 * consecutive addresses in a range, it makes sense to cache the 69 * result of a lookup. 70 */ 71 static struct mmio_rb_range **mmio_hint; 72 static int mmio_ncpu; 73 74 static pthread_rwlock_t mmio_rwlock; 75 76 static int 77 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 78 { 79 if (a->mr_end < b->mr_base) 80 return (-1); 81 else if (a->mr_base > b->mr_end) 82 return (1); 83 return (0); 84 } 85 86 static int 87 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 88 struct mmio_rb_range **entry) 89 { 90 struct mmio_rb_range find, *res; 91 92 find.mr_base = find.mr_end = addr; 93 94 res = RB_FIND(mmio_rb_tree, rbt, &find); 95 96 if (res != NULL) { 97 *entry = res; 98 return (0); 99 } 100 101 return (ENOENT); 102 } 103 104 static int 105 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 106 { 107 struct mmio_rb_range *overlap; 108 109 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 110 111 if (overlap != NULL) { 112 #ifdef RB_DEBUG 113 printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' " 114 "claims region already claimed for '%s'\n", 115 new->mr_base, new->mr_end, 116 overlap->mr_base, overlap->mr_end, 117 new->mr_param.name, overlap->mr_param.name); 118 #endif 119 120 return (EEXIST); 121 } 122 123 return (0); 124 } 125 126 #if 0 127 static void 128 mmio_rb_dump(struct mmio_rb_tree *rbt) 129 { 130 int perror; 131 struct mmio_rb_range *np; 132 133 pthread_rwlock_rdlock(&mmio_rwlock); 134 RB_FOREACH(np, mmio_rb_tree, rbt) { 135 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 136 np->mr_param.name); 137 } 138 perror = pthread_rwlock_unlock(&mmio_rwlock); 139 assert(perror == 0); 140 } 141 #endif 142 143 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 144 145 typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, 146 struct mem_range *mr, void *arg); 147 148 static int 149 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 150 { 151 int error; 152 struct mem_range *mr = arg; 153 154 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 155 rval, mr->arg1, mr->arg2); 156 return (error); 157 } 158 159 static int 160 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 161 { 162 int error; 163 struct mem_range *mr = arg; 164 165 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 166 &wval, mr->arg1, mr->arg2); 167 return (error); 168 } 169 170 static int 171 access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, 172 void *arg) 173 { 174 struct mmio_rb_range *entry; 175 int err, perror, immutable; 176 177 pthread_rwlock_rdlock(&mmio_rwlock); 178 /* 179 * First check the per-vCPU cache 180 */ 181 if (mmio_hint[vcpu] && 182 paddr >= mmio_hint[vcpu]->mr_base && 183 paddr <= mmio_hint[vcpu]->mr_end) { 184 entry = mmio_hint[vcpu]; 185 } else 186 entry = NULL; 187 188 if (entry == NULL) { 189 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 190 /* Update the per-vCPU cache */ 191 mmio_hint[vcpu] = entry; 192 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 193 perror = pthread_rwlock_unlock(&mmio_rwlock); 194 assert(perror == 0); 195 return (ESRCH); 196 } 197 } 198 199 assert(entry != NULL); 200 201 /* 202 * An 'immutable' memory range is guaranteed to be never removed 203 * so there is no need to hold 'mmio_rwlock' while calling the 204 * handler. 205 * 206 * XXX writes to the PCIR_COMMAND register can cause register_mem() 207 * to be called. If the guest is using PCI extended config space 208 * to modify the PCIR_COMMAND register then register_mem() can 209 * deadlock on 'mmio_rwlock'. However by registering the extended 210 * config space window as 'immutable' the deadlock can be avoided. 211 */ 212 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); 213 if (immutable) { 214 perror = pthread_rwlock_unlock(&mmio_rwlock); 215 assert(perror == 0); 216 } 217 218 err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); 219 220 if (!immutable) { 221 perror = pthread_rwlock_unlock(&mmio_rwlock); 222 assert(perror == 0); 223 } 224 225 226 return (err); 227 } 228 229 struct emulate_mem_args { 230 struct vie *vie; 231 struct vm_guest_paging *paging; 232 }; 233 234 static int 235 emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 236 void *arg) 237 { 238 struct emulate_mem_args *ema; 239 240 ema = arg; 241 return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging, 242 mem_read, mem_write, mr)); 243 } 244 245 int 246 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie, 247 struct vm_guest_paging *paging) 248 249 { 250 struct emulate_mem_args ema; 251 252 ema.vie = vie; 253 ema.paging = paging; 254 return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema)); 255 } 256 257 struct rw_mem_args { 258 uint64_t *val; 259 int size; 260 int operation; 261 }; 262 263 static int 264 rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 265 void *arg) 266 { 267 struct rw_mem_args *rma; 268 269 rma = arg; 270 return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size, 271 rma->val, mr->arg1, mr->arg2)); 272 } 273 274 int 275 read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) 276 { 277 struct rw_mem_args rma; 278 279 rma.val = rval; 280 rma.size = size; 281 rma.operation = MEM_F_READ; 282 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 283 } 284 285 int 286 write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size) 287 { 288 struct rw_mem_args rma; 289 290 rma.val = &wval; 291 rma.size = size; 292 rma.operation = MEM_F_WRITE; 293 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 294 } 295 296 static int 297 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 298 { 299 struct mmio_rb_range *entry, *mrp; 300 int err, perror; 301 302 err = 0; 303 304 mrp = malloc(sizeof(struct mmio_rb_range)); 305 if (mrp == NULL) { 306 warn("%s: couldn't allocate memory for mrp\n", 307 __func__); 308 err = ENOMEM; 309 } else { 310 mrp->mr_param = *memp; 311 mrp->mr_base = memp->base; 312 mrp->mr_end = memp->base + memp->size - 1; 313 pthread_rwlock_wrlock(&mmio_rwlock); 314 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 315 err = mmio_rb_add(rbt, mrp); 316 perror = pthread_rwlock_unlock(&mmio_rwlock); 317 assert(perror == 0); 318 if (err) 319 free(mrp); 320 } 321 322 return (err); 323 } 324 325 int 326 register_mem(struct mem_range *memp) 327 { 328 329 return (register_mem_int(&mmio_rb_root, memp)); 330 } 331 332 int 333 register_mem_fallback(struct mem_range *memp) 334 { 335 336 return (register_mem_int(&mmio_rb_fallback, memp)); 337 } 338 339 int 340 unregister_mem(struct mem_range *memp) 341 { 342 struct mem_range *mr; 343 struct mmio_rb_range *entry = NULL; 344 int err, perror, i; 345 346 pthread_rwlock_wrlock(&mmio_rwlock); 347 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 348 if (err == 0) { 349 mr = &entry->mr_param; 350 assert(mr->name == memp->name); 351 assert(mr->base == memp->base && mr->size == memp->size); 352 assert((mr->flags & MEM_F_IMMUTABLE) == 0); 353 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 354 355 /* flush Per-vCPU cache */ 356 for (i = 0; i < mmio_ncpu; i++) { 357 if (mmio_hint[i] == entry) 358 mmio_hint[i] = NULL; 359 } 360 } 361 perror = pthread_rwlock_unlock(&mmio_rwlock); 362 assert(perror == 0); 363 364 if (entry) 365 free(entry); 366 367 return (err); 368 } 369 370 void 371 init_mem(int ncpu) 372 { 373 374 mmio_ncpu = ncpu; 375 mmio_hint = calloc(ncpu, sizeof(*mmio_hint)); 376 RB_INIT(&mmio_rb_root); 377 RB_INIT(&mmio_rb_fallback); 378 pthread_rwlock_init(&mmio_rwlock, NULL); 379 } 380