14d1e669cSPeter Grehan /*- 21de7b4b8SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 31de7b4b8SPedro F. Giffuni * 44d1e669cSPeter Grehan * Copyright (c) 2012 NetApp, Inc. 54d1e669cSPeter Grehan * All rights reserved. 64d1e669cSPeter Grehan * 74d1e669cSPeter Grehan * Redistribution and use in source and binary forms, with or without 84d1e669cSPeter Grehan * modification, are permitted provided that the following conditions 94d1e669cSPeter Grehan * are met: 104d1e669cSPeter Grehan * 1. Redistributions of source code must retain the above copyright 114d1e669cSPeter Grehan * notice, this list of conditions and the following disclaimer. 124d1e669cSPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 134d1e669cSPeter Grehan * notice, this list of conditions and the following disclaimer in the 144d1e669cSPeter Grehan * documentation and/or other materials provided with the distribution. 154d1e669cSPeter Grehan * 164d1e669cSPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 174d1e669cSPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 184d1e669cSPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 194d1e669cSPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 204d1e669cSPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 214d1e669cSPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 224d1e669cSPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 234d1e669cSPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 244d1e669cSPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 254d1e669cSPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 264d1e669cSPeter Grehan * SUCH DAMAGE. 274d1e669cSPeter Grehan * 284d1e669cSPeter Grehan * $FreeBSD$ 294d1e669cSPeter Grehan */ 304d1e669cSPeter Grehan 314d1e669cSPeter Grehan /* 324d1e669cSPeter Grehan * Memory ranges are represented with an RB tree. On insertion, the range 334d1e669cSPeter Grehan * is checked for overlaps. On lookup, the key has the same base and limit 344d1e669cSPeter Grehan * so it can be searched within the range. 354d1e669cSPeter Grehan */ 364d1e669cSPeter Grehan 374d1e669cSPeter Grehan #include <sys/cdefs.h> 384d1e669cSPeter Grehan __FBSDID("$FreeBSD$"); 394d1e669cSPeter Grehan 404d1e669cSPeter Grehan #include <sys/types.h> 414d1e669cSPeter Grehan #include <sys/errno.h> 42f2b5dc3aSMarcelo Araujo #include <sys/tree.h> 434d1e669cSPeter Grehan #include <machine/vmm.h> 44e813a873SNeel Natu #include <machine/vmm_instruction_emul.h> 454d1e669cSPeter Grehan 46f2b5dc3aSMarcelo Araujo #include <assert.h> 47f2b5dc3aSMarcelo Araujo #include <err.h> 48f2b5dc3aSMarcelo Araujo #include <pthread.h> 494d1e669cSPeter Grehan #include <stdio.h> 504d1e669cSPeter Grehan #include <stdlib.h> 514d1e669cSPeter Grehan 524d1e669cSPeter Grehan #include "mem.h" 534d1e669cSPeter Grehan 544d1e669cSPeter Grehan struct mmio_rb_range { 554d1e669cSPeter Grehan RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 564d1e669cSPeter Grehan struct mem_range mr_param; 574d1e669cSPeter Grehan uint64_t mr_base; 584d1e669cSPeter Grehan uint64_t mr_end; 594d1e669cSPeter Grehan }; 604d1e669cSPeter Grehan 614d1e669cSPeter Grehan struct mmio_rb_tree; 624d1e669cSPeter Grehan RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 634d1e669cSPeter Grehan 64*37045dfaSMark Johnston static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 654d1e669cSPeter Grehan 664d1e669cSPeter Grehan /* 674d1e669cSPeter Grehan * Per-vCPU cache. Since most accesses from a vCPU will be to 684d1e669cSPeter Grehan * consecutive addresses in a range, it makes sense to cache the 694d1e669cSPeter Grehan * result of a lookup. 704d1e669cSPeter Grehan */ 71730510dcSJohn Baldwin static struct mmio_rb_range **mmio_hint; 72730510dcSJohn Baldwin static int mmio_ncpu; 734d1e669cSPeter Grehan 74ae551da6SNeel Natu static pthread_rwlock_t mmio_rwlock; 75028d9311SNeel Natu 764d1e669cSPeter Grehan static int 774d1e669cSPeter Grehan mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 784d1e669cSPeter Grehan { 794d1e669cSPeter Grehan if (a->mr_end < b->mr_base) 804d1e669cSPeter Grehan return (-1); 814d1e669cSPeter Grehan else if (a->mr_base > b->mr_end) 824d1e669cSPeter Grehan return (1); 834d1e669cSPeter Grehan return (0); 844d1e669cSPeter Grehan } 854d1e669cSPeter Grehan 864d1e669cSPeter Grehan static int 870ab13648SPeter Grehan mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 880ab13648SPeter Grehan struct mmio_rb_range **entry) 894d1e669cSPeter Grehan { 904d1e669cSPeter Grehan struct mmio_rb_range find, *res; 914d1e669cSPeter Grehan 924d1e669cSPeter Grehan find.mr_base = find.mr_end = addr; 934d1e669cSPeter Grehan 940ab13648SPeter Grehan res = RB_FIND(mmio_rb_tree, rbt, &find); 954d1e669cSPeter Grehan 964d1e669cSPeter Grehan if (res != NULL) { 974d1e669cSPeter Grehan *entry = res; 984d1e669cSPeter Grehan return (0); 994d1e669cSPeter Grehan } 1004d1e669cSPeter Grehan 1014d1e669cSPeter Grehan return (ENOENT); 1024d1e669cSPeter Grehan } 1034d1e669cSPeter Grehan 1044d1e669cSPeter Grehan static int 1050ab13648SPeter Grehan mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 1064d1e669cSPeter Grehan { 1074d1e669cSPeter Grehan struct mmio_rb_range *overlap; 1084d1e669cSPeter Grehan 1090ab13648SPeter Grehan overlap = RB_INSERT(mmio_rb_tree, rbt, new); 1104d1e669cSPeter Grehan 1114d1e669cSPeter Grehan if (overlap != NULL) { 1124d1e669cSPeter Grehan #ifdef RB_DEBUG 113efec757bSRobert Crowston printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' " 114efec757bSRobert Crowston "claims region already claimed for '%s'\n", 1154d1e669cSPeter Grehan new->mr_base, new->mr_end, 116efec757bSRobert Crowston overlap->mr_base, overlap->mr_end, 117efec757bSRobert Crowston new->mr_param.name, overlap->mr_param.name); 1184d1e669cSPeter Grehan #endif 1194d1e669cSPeter Grehan 1204d1e669cSPeter Grehan return (EEXIST); 1214d1e669cSPeter Grehan } 1224d1e669cSPeter Grehan 1234d1e669cSPeter Grehan return (0); 1244d1e669cSPeter Grehan } 1254d1e669cSPeter Grehan 1264d1e669cSPeter Grehan #if 0 1274d1e669cSPeter Grehan static void 1280ab13648SPeter Grehan mmio_rb_dump(struct mmio_rb_tree *rbt) 1294d1e669cSPeter Grehan { 1305f4c83abSMarcelo Araujo int perror; 1314d1e669cSPeter Grehan struct mmio_rb_range *np; 1324d1e669cSPeter Grehan 133ae551da6SNeel Natu pthread_rwlock_rdlock(&mmio_rwlock); 1340ab13648SPeter Grehan RB_FOREACH(np, mmio_rb_tree, rbt) { 1354d1e669cSPeter Grehan printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 1364d1e669cSPeter Grehan np->mr_param.name); 1374d1e669cSPeter Grehan } 1385f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 1395f4c83abSMarcelo Araujo assert(perror == 0); 1404d1e669cSPeter Grehan } 1414d1e669cSPeter Grehan #endif 1424d1e669cSPeter Grehan 1434d1e669cSPeter Grehan RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 1444d1e669cSPeter Grehan 145cd377eb3SJohn Baldwin typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, 146cd377eb3SJohn Baldwin struct mem_range *mr, void *arg); 147cd377eb3SJohn Baldwin 148ba9b7bf7SNeel Natu static int 149ba9b7bf7SNeel Natu mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 150ba9b7bf7SNeel Natu { 151ba9b7bf7SNeel Natu int error; 152ba9b7bf7SNeel Natu struct mem_range *mr = arg; 153ba9b7bf7SNeel Natu 154ba9b7bf7SNeel Natu error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 155ba9b7bf7SNeel Natu rval, mr->arg1, mr->arg2); 156ba9b7bf7SNeel Natu return (error); 157ba9b7bf7SNeel Natu } 158ba9b7bf7SNeel Natu 159ba9b7bf7SNeel Natu static int 160ba9b7bf7SNeel Natu mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 161ba9b7bf7SNeel Natu { 162ba9b7bf7SNeel Natu int error; 163ba9b7bf7SNeel Natu struct mem_range *mr = arg; 164ba9b7bf7SNeel Natu 165ba9b7bf7SNeel Natu error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 166ba9b7bf7SNeel Natu &wval, mr->arg1, mr->arg2); 167ba9b7bf7SNeel Natu return (error); 168ba9b7bf7SNeel Natu } 169ba9b7bf7SNeel Natu 170cd377eb3SJohn Baldwin static int 171cd377eb3SJohn Baldwin access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, 172cd377eb3SJohn Baldwin void *arg) 1734d1e669cSPeter Grehan { 1744d1e669cSPeter Grehan struct mmio_rb_range *entry; 1755f4c83abSMarcelo Araujo int err, perror, immutable; 1764d1e669cSPeter Grehan 177ae551da6SNeel Natu pthread_rwlock_rdlock(&mmio_rwlock); 1784d1e669cSPeter Grehan /* 1794d1e669cSPeter Grehan * First check the per-vCPU cache 1804d1e669cSPeter Grehan */ 1814d1e669cSPeter Grehan if (mmio_hint[vcpu] && 1824d1e669cSPeter Grehan paddr >= mmio_hint[vcpu]->mr_base && 1834d1e669cSPeter Grehan paddr <= mmio_hint[vcpu]->mr_end) { 184ba9b7bf7SNeel Natu entry = mmio_hint[vcpu]; 185ba9b7bf7SNeel Natu } else 186ba9b7bf7SNeel Natu entry = NULL; 187ba9b7bf7SNeel Natu 188ba9b7bf7SNeel Natu if (entry == NULL) { 189028d9311SNeel Natu if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 190ba9b7bf7SNeel Natu /* Update the per-vCPU cache */ 1914d1e669cSPeter Grehan mmio_hint[vcpu] = entry; 1920ab13648SPeter Grehan } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 1935f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 1945f4c83abSMarcelo Araujo assert(perror == 0); 1950ab13648SPeter Grehan return (ESRCH); 1960ab13648SPeter Grehan } 1974d1e669cSPeter Grehan } 1984d1e669cSPeter Grehan 1990ab13648SPeter Grehan assert(entry != NULL); 20012a6eb99SNeel Natu 20112a6eb99SNeel Natu /* 20212a6eb99SNeel Natu * An 'immutable' memory range is guaranteed to be never removed 20312a6eb99SNeel Natu * so there is no need to hold 'mmio_rwlock' while calling the 20412a6eb99SNeel Natu * handler. 20512a6eb99SNeel Natu * 20612a6eb99SNeel Natu * XXX writes to the PCIR_COMMAND register can cause register_mem() 20712a6eb99SNeel Natu * to be called. If the guest is using PCI extended config space 20812a6eb99SNeel Natu * to modify the PCIR_COMMAND register then register_mem() can 20912a6eb99SNeel Natu * deadlock on 'mmio_rwlock'. However by registering the extended 21012a6eb99SNeel Natu * config space window as 'immutable' the deadlock can be avoided. 21112a6eb99SNeel Natu */ 21212a6eb99SNeel Natu immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); 2135f4c83abSMarcelo Araujo if (immutable) { 2145f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 2155f4c83abSMarcelo Araujo assert(perror == 0); 2165f4c83abSMarcelo Araujo } 21712a6eb99SNeel Natu 218cd377eb3SJohn Baldwin err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); 21912a6eb99SNeel Natu 2205f4c83abSMarcelo Araujo if (!immutable) { 2215f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 2225f4c83abSMarcelo Araujo assert(perror == 0); 2235f4c83abSMarcelo Araujo } 2245f4c83abSMarcelo Araujo 225028d9311SNeel Natu 2264d1e669cSPeter Grehan return (err); 2274d1e669cSPeter Grehan } 2284d1e669cSPeter Grehan 229cd377eb3SJohn Baldwin struct emulate_mem_args { 230cd377eb3SJohn Baldwin struct vie *vie; 231cd377eb3SJohn Baldwin struct vm_guest_paging *paging; 232cd377eb3SJohn Baldwin }; 233cd377eb3SJohn Baldwin 234cd377eb3SJohn Baldwin static int 235cd377eb3SJohn Baldwin emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 236cd377eb3SJohn Baldwin void *arg) 237cd377eb3SJohn Baldwin { 238cd377eb3SJohn Baldwin struct emulate_mem_args *ema; 239cd377eb3SJohn Baldwin 240cd377eb3SJohn Baldwin ema = arg; 241cd377eb3SJohn Baldwin return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging, 242cd377eb3SJohn Baldwin mem_read, mem_write, mr)); 243cd377eb3SJohn Baldwin } 244cd377eb3SJohn Baldwin 245cd377eb3SJohn Baldwin int 246cd377eb3SJohn Baldwin emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie, 247cd377eb3SJohn Baldwin struct vm_guest_paging *paging) 248cd377eb3SJohn Baldwin 249cd377eb3SJohn Baldwin { 250cd377eb3SJohn Baldwin struct emulate_mem_args ema; 251cd377eb3SJohn Baldwin 252cd377eb3SJohn Baldwin ema.vie = vie; 253cd377eb3SJohn Baldwin ema.paging = paging; 254cd377eb3SJohn Baldwin return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema)); 255cd377eb3SJohn Baldwin } 256cd377eb3SJohn Baldwin 2571b52cd45SJohn Baldwin struct rw_mem_args { 2581b52cd45SJohn Baldwin uint64_t *val; 259cd377eb3SJohn Baldwin int size; 2601b52cd45SJohn Baldwin int operation; 261cd377eb3SJohn Baldwin }; 262cd377eb3SJohn Baldwin 263cd377eb3SJohn Baldwin static int 2641b52cd45SJohn Baldwin rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 265cd377eb3SJohn Baldwin void *arg) 266cd377eb3SJohn Baldwin { 2671b52cd45SJohn Baldwin struct rw_mem_args *rma; 268cd377eb3SJohn Baldwin 269cd377eb3SJohn Baldwin rma = arg; 2701b52cd45SJohn Baldwin return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size, 2711b52cd45SJohn Baldwin rma->val, mr->arg1, mr->arg2)); 272cd377eb3SJohn Baldwin } 273cd377eb3SJohn Baldwin 274cd377eb3SJohn Baldwin int 275cd377eb3SJohn Baldwin read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) 276cd377eb3SJohn Baldwin { 2771b52cd45SJohn Baldwin struct rw_mem_args rma; 278cd377eb3SJohn Baldwin 2791b52cd45SJohn Baldwin rma.val = rval; 280cd377eb3SJohn Baldwin rma.size = size; 2811b52cd45SJohn Baldwin rma.operation = MEM_F_READ; 2821b52cd45SJohn Baldwin return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 2831b52cd45SJohn Baldwin } 2841b52cd45SJohn Baldwin 2851b52cd45SJohn Baldwin int 2861b52cd45SJohn Baldwin write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size) 2871b52cd45SJohn Baldwin { 2881b52cd45SJohn Baldwin struct rw_mem_args rma; 2891b52cd45SJohn Baldwin 2901b52cd45SJohn Baldwin rma.val = &wval; 2911b52cd45SJohn Baldwin rma.size = size; 2921b52cd45SJohn Baldwin rma.operation = MEM_F_WRITE; 2931b52cd45SJohn Baldwin return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 294cd377eb3SJohn Baldwin } 295cd377eb3SJohn Baldwin 2960ab13648SPeter Grehan static int 2970ab13648SPeter Grehan register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 2984d1e669cSPeter Grehan { 299028d9311SNeel Natu struct mmio_rb_range *entry, *mrp; 3005f4c83abSMarcelo Araujo int err, perror; 3014d1e669cSPeter Grehan 3024d1e669cSPeter Grehan err = 0; 3034d1e669cSPeter Grehan 3044d1e669cSPeter Grehan mrp = malloc(sizeof(struct mmio_rb_range)); 305f2b5dc3aSMarcelo Araujo if (mrp == NULL) { 306f2b5dc3aSMarcelo Araujo warn("%s: couldn't allocate memory for mrp\n", 307f2b5dc3aSMarcelo Araujo __func__); 308f2b5dc3aSMarcelo Araujo err = ENOMEM; 309f2b5dc3aSMarcelo Araujo } else { 3104d1e669cSPeter Grehan mrp->mr_param = *memp; 3114d1e669cSPeter Grehan mrp->mr_base = memp->base; 3124d1e669cSPeter Grehan mrp->mr_end = memp->base + memp->size - 1; 313ae551da6SNeel Natu pthread_rwlock_wrlock(&mmio_rwlock); 314028d9311SNeel Natu if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 3150ab13648SPeter Grehan err = mmio_rb_add(rbt, mrp); 3165f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 3175f4c83abSMarcelo Araujo assert(perror == 0); 3184d1e669cSPeter Grehan if (err) 3194d1e669cSPeter Grehan free(mrp); 320f2b5dc3aSMarcelo Araujo } 3214d1e669cSPeter Grehan 3224d1e669cSPeter Grehan return (err); 3234d1e669cSPeter Grehan } 3244d1e669cSPeter Grehan 3250ab13648SPeter Grehan int 3260ab13648SPeter Grehan register_mem(struct mem_range *memp) 3270ab13648SPeter Grehan { 3280ab13648SPeter Grehan 3290ab13648SPeter Grehan return (register_mem_int(&mmio_rb_root, memp)); 3300ab13648SPeter Grehan } 3310ab13648SPeter Grehan 3320ab13648SPeter Grehan int 3330ab13648SPeter Grehan register_mem_fallback(struct mem_range *memp) 3340ab13648SPeter Grehan { 3350ab13648SPeter Grehan 3360ab13648SPeter Grehan return (register_mem_int(&mmio_rb_fallback, memp)); 3370ab13648SPeter Grehan } 3380ab13648SPeter Grehan 339028d9311SNeel Natu int 340028d9311SNeel Natu unregister_mem(struct mem_range *memp) 341028d9311SNeel Natu { 342028d9311SNeel Natu struct mem_range *mr; 343028d9311SNeel Natu struct mmio_rb_range *entry = NULL; 3445f4c83abSMarcelo Araujo int err, perror, i; 345028d9311SNeel Natu 346ae551da6SNeel Natu pthread_rwlock_wrlock(&mmio_rwlock); 347028d9311SNeel Natu err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 348028d9311SNeel Natu if (err == 0) { 349028d9311SNeel Natu mr = &entry->mr_param; 350028d9311SNeel Natu assert(mr->name == memp->name); 351028d9311SNeel Natu assert(mr->base == memp->base && mr->size == memp->size); 35212a6eb99SNeel Natu assert((mr->flags & MEM_F_IMMUTABLE) == 0); 353028d9311SNeel Natu RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 354028d9311SNeel Natu 355028d9311SNeel Natu /* flush Per-vCPU cache */ 356730510dcSJohn Baldwin for (i = 0; i < mmio_ncpu; i++) { 357028d9311SNeel Natu if (mmio_hint[i] == entry) 358028d9311SNeel Natu mmio_hint[i] = NULL; 359028d9311SNeel Natu } 360028d9311SNeel Natu } 3615f4c83abSMarcelo Araujo perror = pthread_rwlock_unlock(&mmio_rwlock); 3625f4c83abSMarcelo Araujo assert(perror == 0); 363028d9311SNeel Natu 364028d9311SNeel Natu if (entry) 365028d9311SNeel Natu free(entry); 366028d9311SNeel Natu 367028d9311SNeel Natu return (err); 368028d9311SNeel Natu } 369028d9311SNeel Natu 3704d1e669cSPeter Grehan void 371730510dcSJohn Baldwin init_mem(int ncpu) 3724d1e669cSPeter Grehan { 3734d1e669cSPeter Grehan 374730510dcSJohn Baldwin mmio_ncpu = ncpu; 375730510dcSJohn Baldwin mmio_hint = calloc(ncpu, sizeof(*mmio_hint)); 3760ab13648SPeter Grehan RB_INIT(&mmio_rb_root); 3770ab13648SPeter Grehan RB_INIT(&mmio_rb_fallback); 378ae551da6SNeel Natu pthread_rwlock_init(&mmio_rwlock, NULL); 3794d1e669cSPeter Grehan } 380