1 /*- 2 * Copyright (c) 2012 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Memory ranges are represented with an RB tree. On insertion, the range 31 * is checked for overlaps. On lookup, the key has the same base and limit 32 * so it can be searched within the range. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/types.h> 39 #include <sys/tree.h> 40 #include <sys/errno.h> 41 #include <machine/vmm.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <assert.h> 46 #include <pthread.h> 47 48 #include "mem.h" 49 50 struct mmio_rb_range { 51 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 52 struct mem_range mr_param; 53 uint64_t mr_base; 54 uint64_t mr_end; 55 }; 56 57 struct mmio_rb_tree; 58 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 59 60 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 61 62 /* 63 * Per-vCPU cache. Since most accesses from a vCPU will be to 64 * consecutive addresses in a range, it makes sense to cache the 65 * result of a lookup. 66 */ 67 static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; 68 69 static pthread_rwlock_t mmio_rwlock; 70 71 static int 72 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 73 { 74 if (a->mr_end < b->mr_base) 75 return (-1); 76 else if (a->mr_base > b->mr_end) 77 return (1); 78 return (0); 79 } 80 81 static int 82 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 83 struct mmio_rb_range **entry) 84 { 85 struct mmio_rb_range find, *res; 86 87 find.mr_base = find.mr_end = addr; 88 89 res = RB_FIND(mmio_rb_tree, rbt, &find); 90 91 if (res != NULL) { 92 *entry = res; 93 return (0); 94 } 95 96 return (ENOENT); 97 } 98 99 static int 100 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 101 { 102 struct mmio_rb_range *overlap; 103 104 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 105 106 if (overlap != NULL) { 107 #ifdef RB_DEBUG 108 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n", 109 new->mr_base, new->mr_end, 110 overlap->mr_base, overlap->mr_end); 111 #endif 112 113 return (EEXIST); 114 } 115 116 return (0); 117 } 118 119 #if 0 120 static void 121 mmio_rb_dump(struct mmio_rb_tree *rbt) 122 { 123 struct mmio_rb_range *np; 124 125 pthread_rwlock_rdlock(&mmio_rwlock); 126 RB_FOREACH(np, mmio_rb_tree, rbt) { 127 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 128 np->mr_param.name); 129 } 130 pthread_rwlock_unlock(&mmio_rwlock); 131 } 132 #endif 133 134 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 135 136 static int 137 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 138 { 139 int error; 140 struct mem_range *mr = arg; 141 142 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 143 rval, mr->arg1, mr->arg2); 144 return (error); 145 } 146 147 static int 148 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 149 { 150 int error; 151 struct mem_range *mr = arg; 152 153 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 154 &wval, mr->arg1, mr->arg2); 155 return (error); 156 } 157 158 int 159 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie) 160 { 161 struct mmio_rb_range *entry; 162 int err; 163 164 pthread_rwlock_rdlock(&mmio_rwlock); 165 /* 166 * First check the per-vCPU cache 167 */ 168 if (mmio_hint[vcpu] && 169 paddr >= mmio_hint[vcpu]->mr_base && 170 paddr <= mmio_hint[vcpu]->mr_end) { 171 entry = mmio_hint[vcpu]; 172 } else 173 entry = NULL; 174 175 if (entry == NULL) { 176 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 177 /* Update the per-vCPU cache */ 178 mmio_hint[vcpu] = entry; 179 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 180 pthread_rwlock_unlock(&mmio_rwlock); 181 return (ESRCH); 182 } 183 } 184 185 assert(entry != NULL); 186 err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, 187 mem_read, mem_write, &entry->mr_param); 188 pthread_rwlock_unlock(&mmio_rwlock); 189 190 return (err); 191 } 192 193 static int 194 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 195 { 196 struct mmio_rb_range *entry, *mrp; 197 int err; 198 199 err = 0; 200 201 mrp = malloc(sizeof(struct mmio_rb_range)); 202 203 if (mrp != NULL) { 204 mrp->mr_param = *memp; 205 mrp->mr_base = memp->base; 206 mrp->mr_end = memp->base + memp->size - 1; 207 pthread_rwlock_wrlock(&mmio_rwlock); 208 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 209 err = mmio_rb_add(rbt, mrp); 210 pthread_rwlock_unlock(&mmio_rwlock); 211 if (err) 212 free(mrp); 213 } else 214 err = ENOMEM; 215 216 return (err); 217 } 218 219 int 220 register_mem(struct mem_range *memp) 221 { 222 223 return (register_mem_int(&mmio_rb_root, memp)); 224 } 225 226 int 227 register_mem_fallback(struct mem_range *memp) 228 { 229 230 return (register_mem_int(&mmio_rb_fallback, memp)); 231 } 232 233 int 234 unregister_mem(struct mem_range *memp) 235 { 236 struct mem_range *mr; 237 struct mmio_rb_range *entry = NULL; 238 int err, i; 239 240 pthread_rwlock_wrlock(&mmio_rwlock); 241 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 242 if (err == 0) { 243 mr = &entry->mr_param; 244 assert(mr->name == memp->name); 245 assert(mr->base == memp->base && mr->size == memp->size); 246 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 247 248 /* flush Per-vCPU cache */ 249 for (i=0; i < VM_MAXCPU; i++) { 250 if (mmio_hint[i] == entry) 251 mmio_hint[i] = NULL; 252 } 253 } 254 pthread_rwlock_unlock(&mmio_rwlock); 255 256 if (entry) 257 free(entry); 258 259 return (err); 260 } 261 262 void 263 init_mem(void) 264 { 265 266 RB_INIT(&mmio_rb_root); 267 RB_INIT(&mmio_rb_fallback); 268 pthread_rwlock_init(&mmio_rwlock, NULL); 269 } 270