1 /*- 2 * Copyright (c) 2012 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Memory ranges are represented with an RB tree. On insertion, the range 31 * is checked for overlaps. On lookup, the key has the same base and limit 32 * so it can be searched within the range. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/types.h> 39 #include <sys/tree.h> 40 #include <sys/errno.h> 41 #include <machine/vmm.h> 42 #include <machine/vmm_instruction_emul.h> 43 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <assert.h> 47 #include <pthread.h> 48 49 #include "mem.h" 50 51 struct mmio_rb_range { 52 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 53 struct mem_range mr_param; 54 uint64_t mr_base; 55 uint64_t mr_end; 56 }; 57 58 struct mmio_rb_tree; 59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 60 61 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 62 63 /* 64 * Per-vCPU cache. Since most accesses from a vCPU will be to 65 * consecutive addresses in a range, it makes sense to cache the 66 * result of a lookup. 67 */ 68 static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; 69 70 static pthread_rwlock_t mmio_rwlock; 71 72 static int 73 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 74 { 75 if (a->mr_end < b->mr_base) 76 return (-1); 77 else if (a->mr_base > b->mr_end) 78 return (1); 79 return (0); 80 } 81 82 static int 83 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 84 struct mmio_rb_range **entry) 85 { 86 struct mmio_rb_range find, *res; 87 88 find.mr_base = find.mr_end = addr; 89 90 res = RB_FIND(mmio_rb_tree, rbt, &find); 91 92 if (res != NULL) { 93 *entry = res; 94 return (0); 95 } 96 97 return (ENOENT); 98 } 99 100 static int 101 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 102 { 103 struct mmio_rb_range *overlap; 104 105 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 106 107 if (overlap != NULL) { 108 #ifdef RB_DEBUG 109 printf("overlap detected: new %lx:%lx, tree %lx:%lx\n", 110 new->mr_base, new->mr_end, 111 overlap->mr_base, overlap->mr_end); 112 #endif 113 114 return (EEXIST); 115 } 116 117 return (0); 118 } 119 120 #if 0 121 static void 122 mmio_rb_dump(struct mmio_rb_tree *rbt) 123 { 124 struct mmio_rb_range *np; 125 126 pthread_rwlock_rdlock(&mmio_rwlock); 127 RB_FOREACH(np, mmio_rb_tree, rbt) { 128 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 129 np->mr_param.name); 130 } 131 pthread_rwlock_unlock(&mmio_rwlock); 132 } 133 #endif 134 135 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 136 137 static int 138 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 139 { 140 int error; 141 struct mem_range *mr = arg; 142 143 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 144 rval, mr->arg1, mr->arg2); 145 return (error); 146 } 147 148 static int 149 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 150 { 151 int error; 152 struct mem_range *mr = arg; 153 154 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 155 &wval, mr->arg1, mr->arg2); 156 return (error); 157 } 158 159 int 160 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie, 161 struct vm_guest_paging *paging) 162 163 { 164 struct mmio_rb_range *entry; 165 int err; 166 167 pthread_rwlock_rdlock(&mmio_rwlock); 168 /* 169 * First check the per-vCPU cache 170 */ 171 if (mmio_hint[vcpu] && 172 paddr >= mmio_hint[vcpu]->mr_base && 173 paddr <= mmio_hint[vcpu]->mr_end) { 174 entry = mmio_hint[vcpu]; 175 } else 176 entry = NULL; 177 178 if (entry == NULL) { 179 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 180 /* Update the per-vCPU cache */ 181 mmio_hint[vcpu] = entry; 182 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 183 pthread_rwlock_unlock(&mmio_rwlock); 184 return (ESRCH); 185 } 186 } 187 188 assert(entry != NULL); 189 err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, paging, 190 mem_read, mem_write, &entry->mr_param); 191 pthread_rwlock_unlock(&mmio_rwlock); 192 193 return (err); 194 } 195 196 static int 197 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 198 { 199 struct mmio_rb_range *entry, *mrp; 200 int err; 201 202 err = 0; 203 204 mrp = malloc(sizeof(struct mmio_rb_range)); 205 206 if (mrp != NULL) { 207 mrp->mr_param = *memp; 208 mrp->mr_base = memp->base; 209 mrp->mr_end = memp->base + memp->size - 1; 210 pthread_rwlock_wrlock(&mmio_rwlock); 211 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 212 err = mmio_rb_add(rbt, mrp); 213 pthread_rwlock_unlock(&mmio_rwlock); 214 if (err) 215 free(mrp); 216 } else 217 err = ENOMEM; 218 219 return (err); 220 } 221 222 int 223 register_mem(struct mem_range *memp) 224 { 225 226 return (register_mem_int(&mmio_rb_root, memp)); 227 } 228 229 int 230 register_mem_fallback(struct mem_range *memp) 231 { 232 233 return (register_mem_int(&mmio_rb_fallback, memp)); 234 } 235 236 int 237 unregister_mem(struct mem_range *memp) 238 { 239 struct mem_range *mr; 240 struct mmio_rb_range *entry = NULL; 241 int err, i; 242 243 pthread_rwlock_wrlock(&mmio_rwlock); 244 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 245 if (err == 0) { 246 mr = &entry->mr_param; 247 assert(mr->name == memp->name); 248 assert(mr->base == memp->base && mr->size == memp->size); 249 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 250 251 /* flush Per-vCPU cache */ 252 for (i=0; i < VM_MAXCPU; i++) { 253 if (mmio_hint[i] == entry) 254 mmio_hint[i] = NULL; 255 } 256 } 257 pthread_rwlock_unlock(&mmio_rwlock); 258 259 if (entry) 260 free(entry); 261 262 return (err); 263 } 264 265 void 266 init_mem(void) 267 { 268 269 RB_INIT(&mmio_rb_root); 270 RB_INIT(&mmio_rb_fallback); 271 pthread_rwlock_init(&mmio_rwlock, NULL); 272 } 273