xref: /freebsd/usr.sbin/bhyve/mem.c (revision efec757b20425ac68171acc7f9cec8be750db688)
14d1e669cSPeter Grehan /*-
21de7b4b8SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
31de7b4b8SPedro F. Giffuni  *
44d1e669cSPeter Grehan  * Copyright (c) 2012 NetApp, Inc.
54d1e669cSPeter Grehan  * All rights reserved.
64d1e669cSPeter Grehan  *
74d1e669cSPeter Grehan  * Redistribution and use in source and binary forms, with or without
84d1e669cSPeter Grehan  * modification, are permitted provided that the following conditions
94d1e669cSPeter Grehan  * are met:
104d1e669cSPeter Grehan  * 1. Redistributions of source code must retain the above copyright
114d1e669cSPeter Grehan  *    notice, this list of conditions and the following disclaimer.
124d1e669cSPeter Grehan  * 2. Redistributions in binary form must reproduce the above copyright
134d1e669cSPeter Grehan  *    notice, this list of conditions and the following disclaimer in the
144d1e669cSPeter Grehan  *    documentation and/or other materials provided with the distribution.
154d1e669cSPeter Grehan  *
164d1e669cSPeter Grehan  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
174d1e669cSPeter Grehan  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184d1e669cSPeter Grehan  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194d1e669cSPeter Grehan  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
204d1e669cSPeter Grehan  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214d1e669cSPeter Grehan  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224d1e669cSPeter Grehan  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234d1e669cSPeter Grehan  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244d1e669cSPeter Grehan  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254d1e669cSPeter Grehan  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264d1e669cSPeter Grehan  * SUCH DAMAGE.
274d1e669cSPeter Grehan  *
284d1e669cSPeter Grehan  * $FreeBSD$
294d1e669cSPeter Grehan  */
304d1e669cSPeter Grehan 
314d1e669cSPeter Grehan /*
324d1e669cSPeter Grehan  * Memory ranges are represented with an RB tree. On insertion, the range
334d1e669cSPeter Grehan  * is checked for overlaps. On lookup, the key has the same base and limit
344d1e669cSPeter Grehan  * so it can be searched within the range.
354d1e669cSPeter Grehan  */
364d1e669cSPeter Grehan 
374d1e669cSPeter Grehan #include <sys/cdefs.h>
384d1e669cSPeter Grehan __FBSDID("$FreeBSD$");
394d1e669cSPeter Grehan 
404d1e669cSPeter Grehan #include <sys/types.h>
414d1e669cSPeter Grehan #include <sys/errno.h>
42f2b5dc3aSMarcelo Araujo #include <sys/tree.h>
434d1e669cSPeter Grehan #include <machine/vmm.h>
44e813a873SNeel Natu #include <machine/vmm_instruction_emul.h>
454d1e669cSPeter Grehan 
46f2b5dc3aSMarcelo Araujo #include <assert.h>
47f2b5dc3aSMarcelo Araujo #include <err.h>
48f2b5dc3aSMarcelo Araujo #include <pthread.h>
494d1e669cSPeter Grehan #include <stdio.h>
504d1e669cSPeter Grehan #include <stdlib.h>
514d1e669cSPeter Grehan 
524d1e669cSPeter Grehan #include "mem.h"
534d1e669cSPeter Grehan 
544d1e669cSPeter Grehan struct mmio_rb_range {
554d1e669cSPeter Grehan 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
564d1e669cSPeter Grehan 	struct mem_range	mr_param;
574d1e669cSPeter Grehan 	uint64_t                mr_base;
584d1e669cSPeter Grehan 	uint64_t                mr_end;
594d1e669cSPeter Grehan };
604d1e669cSPeter Grehan 
614d1e669cSPeter Grehan struct mmio_rb_tree;
624d1e669cSPeter Grehan RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
634d1e669cSPeter Grehan 
640ab13648SPeter Grehan RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
654d1e669cSPeter Grehan 
664d1e669cSPeter Grehan /*
674d1e669cSPeter Grehan  * Per-vCPU cache. Since most accesses from a vCPU will be to
684d1e669cSPeter Grehan  * consecutive addresses in a range, it makes sense to cache the
694d1e669cSPeter Grehan  * result of a lookup.
704d1e669cSPeter Grehan  */
714d1e669cSPeter Grehan static struct mmio_rb_range	*mmio_hint[VM_MAXCPU];
724d1e669cSPeter Grehan 
73ae551da6SNeel Natu static pthread_rwlock_t mmio_rwlock;
74028d9311SNeel Natu 
754d1e669cSPeter Grehan static int
764d1e669cSPeter Grehan mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
774d1e669cSPeter Grehan {
784d1e669cSPeter Grehan 	if (a->mr_end < b->mr_base)
794d1e669cSPeter Grehan 		return (-1);
804d1e669cSPeter Grehan 	else if (a->mr_base > b->mr_end)
814d1e669cSPeter Grehan 		return (1);
824d1e669cSPeter Grehan 	return (0);
834d1e669cSPeter Grehan }
844d1e669cSPeter Grehan 
854d1e669cSPeter Grehan static int
860ab13648SPeter Grehan mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
870ab13648SPeter Grehan     struct mmio_rb_range **entry)
884d1e669cSPeter Grehan {
894d1e669cSPeter Grehan 	struct mmio_rb_range find, *res;
904d1e669cSPeter Grehan 
914d1e669cSPeter Grehan 	find.mr_base = find.mr_end = addr;
924d1e669cSPeter Grehan 
930ab13648SPeter Grehan 	res = RB_FIND(mmio_rb_tree, rbt, &find);
944d1e669cSPeter Grehan 
954d1e669cSPeter Grehan 	if (res != NULL) {
964d1e669cSPeter Grehan 		*entry = res;
974d1e669cSPeter Grehan 		return (0);
984d1e669cSPeter Grehan 	}
994d1e669cSPeter Grehan 
1004d1e669cSPeter Grehan 	return (ENOENT);
1014d1e669cSPeter Grehan }
1024d1e669cSPeter Grehan 
1034d1e669cSPeter Grehan static int
1040ab13648SPeter Grehan mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
1054d1e669cSPeter Grehan {
1064d1e669cSPeter Grehan 	struct mmio_rb_range *overlap;
1074d1e669cSPeter Grehan 
1080ab13648SPeter Grehan 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
1094d1e669cSPeter Grehan 
1104d1e669cSPeter Grehan 	if (overlap != NULL) {
1114d1e669cSPeter Grehan #ifdef RB_DEBUG
112*efec757bSRobert Crowston 		printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
113*efec757bSRobert Crowston 		       "claims region already claimed for '%s'\n",
1144d1e669cSPeter Grehan 		       new->mr_base, new->mr_end,
115*efec757bSRobert Crowston 		       overlap->mr_base, overlap->mr_end,
116*efec757bSRobert Crowston 		       new->mr_param.name, overlap->mr_param.name);
1174d1e669cSPeter Grehan #endif
1184d1e669cSPeter Grehan 
1194d1e669cSPeter Grehan 		return (EEXIST);
1204d1e669cSPeter Grehan 	}
1214d1e669cSPeter Grehan 
1224d1e669cSPeter Grehan 	return (0);
1234d1e669cSPeter Grehan }
1244d1e669cSPeter Grehan 
1254d1e669cSPeter Grehan #if 0
1264d1e669cSPeter Grehan static void
1270ab13648SPeter Grehan mmio_rb_dump(struct mmio_rb_tree *rbt)
1284d1e669cSPeter Grehan {
1295f4c83abSMarcelo Araujo 	int perror;
1304d1e669cSPeter Grehan 	struct mmio_rb_range *np;
1314d1e669cSPeter Grehan 
132ae551da6SNeel Natu 	pthread_rwlock_rdlock(&mmio_rwlock);
1330ab13648SPeter Grehan 	RB_FOREACH(np, mmio_rb_tree, rbt) {
1344d1e669cSPeter Grehan 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
1354d1e669cSPeter Grehan 		       np->mr_param.name);
1364d1e669cSPeter Grehan 	}
1375f4c83abSMarcelo Araujo 	perror = pthread_rwlock_unlock(&mmio_rwlock);
1385f4c83abSMarcelo Araujo 	assert(perror == 0);
1394d1e669cSPeter Grehan }
1404d1e669cSPeter Grehan #endif
1414d1e669cSPeter Grehan 
1424d1e669cSPeter Grehan RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
1434d1e669cSPeter Grehan 
144cd377eb3SJohn Baldwin typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa,
145cd377eb3SJohn Baldwin     struct mem_range *mr, void *arg);
146cd377eb3SJohn Baldwin 
147ba9b7bf7SNeel Natu static int
148ba9b7bf7SNeel Natu mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
149ba9b7bf7SNeel Natu {
150ba9b7bf7SNeel Natu 	int error;
151ba9b7bf7SNeel Natu 	struct mem_range *mr = arg;
152ba9b7bf7SNeel Natu 
153ba9b7bf7SNeel Natu 	error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
154ba9b7bf7SNeel Natu 			       rval, mr->arg1, mr->arg2);
155ba9b7bf7SNeel Natu 	return (error);
156ba9b7bf7SNeel Natu }
157ba9b7bf7SNeel Natu 
158ba9b7bf7SNeel Natu static int
159ba9b7bf7SNeel Natu mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
160ba9b7bf7SNeel Natu {
161ba9b7bf7SNeel Natu 	int error;
162ba9b7bf7SNeel Natu 	struct mem_range *mr = arg;
163ba9b7bf7SNeel Natu 
164ba9b7bf7SNeel Natu 	error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
165ba9b7bf7SNeel Natu 			       &wval, mr->arg1, mr->arg2);
166ba9b7bf7SNeel Natu 	return (error);
167ba9b7bf7SNeel Natu }
168ba9b7bf7SNeel Natu 
169cd377eb3SJohn Baldwin static int
170cd377eb3SJohn Baldwin access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb,
171cd377eb3SJohn Baldwin     void *arg)
1724d1e669cSPeter Grehan {
1734d1e669cSPeter Grehan 	struct mmio_rb_range *entry;
1745f4c83abSMarcelo Araujo 	int err, perror, immutable;
1754d1e669cSPeter Grehan 
176ae551da6SNeel Natu 	pthread_rwlock_rdlock(&mmio_rwlock);
1774d1e669cSPeter Grehan 	/*
1784d1e669cSPeter Grehan 	 * First check the per-vCPU cache
1794d1e669cSPeter Grehan 	 */
1804d1e669cSPeter Grehan 	if (mmio_hint[vcpu] &&
1814d1e669cSPeter Grehan 	    paddr >= mmio_hint[vcpu]->mr_base &&
1824d1e669cSPeter Grehan 	    paddr <= mmio_hint[vcpu]->mr_end) {
183ba9b7bf7SNeel Natu 		entry = mmio_hint[vcpu];
184ba9b7bf7SNeel Natu 	} else
185ba9b7bf7SNeel Natu 		entry = NULL;
186ba9b7bf7SNeel Natu 
187ba9b7bf7SNeel Natu 	if (entry == NULL) {
188028d9311SNeel Natu 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
189ba9b7bf7SNeel Natu 			/* Update the per-vCPU cache */
1904d1e669cSPeter Grehan 			mmio_hint[vcpu] = entry;
1910ab13648SPeter Grehan 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
1925f4c83abSMarcelo Araujo 			perror = pthread_rwlock_unlock(&mmio_rwlock);
1935f4c83abSMarcelo Araujo 			assert(perror == 0);
1940ab13648SPeter Grehan 			return (ESRCH);
1950ab13648SPeter Grehan 		}
1964d1e669cSPeter Grehan 	}
1974d1e669cSPeter Grehan 
1980ab13648SPeter Grehan 	assert(entry != NULL);
19912a6eb99SNeel Natu 
20012a6eb99SNeel Natu 	/*
20112a6eb99SNeel Natu 	 * An 'immutable' memory range is guaranteed to be never removed
20212a6eb99SNeel Natu 	 * so there is no need to hold 'mmio_rwlock' while calling the
20312a6eb99SNeel Natu 	 * handler.
20412a6eb99SNeel Natu 	 *
20512a6eb99SNeel Natu 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
20612a6eb99SNeel Natu 	 * to be called. If the guest is using PCI extended config space
20712a6eb99SNeel Natu 	 * to modify the PCIR_COMMAND register then register_mem() can
20812a6eb99SNeel Natu 	 * deadlock on 'mmio_rwlock'. However by registering the extended
20912a6eb99SNeel Natu 	 * config space window as 'immutable' the deadlock can be avoided.
21012a6eb99SNeel Natu 	 */
21112a6eb99SNeel Natu 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
2125f4c83abSMarcelo Araujo 	if (immutable) {
2135f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
2145f4c83abSMarcelo Araujo 		assert(perror == 0);
2155f4c83abSMarcelo Araujo 	}
21612a6eb99SNeel Natu 
217cd377eb3SJohn Baldwin 	err = cb(ctx, vcpu, paddr, &entry->mr_param, arg);
21812a6eb99SNeel Natu 
2195f4c83abSMarcelo Araujo 	if (!immutable) {
2205f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
2215f4c83abSMarcelo Araujo 		assert(perror == 0);
2225f4c83abSMarcelo Araujo 	}
2235f4c83abSMarcelo Araujo 
224028d9311SNeel Natu 
2254d1e669cSPeter Grehan 	return (err);
2264d1e669cSPeter Grehan }
2274d1e669cSPeter Grehan 
228cd377eb3SJohn Baldwin struct emulate_mem_args {
229cd377eb3SJohn Baldwin 	struct vie *vie;
230cd377eb3SJohn Baldwin 	struct vm_guest_paging *paging;
231cd377eb3SJohn Baldwin };
232cd377eb3SJohn Baldwin 
233cd377eb3SJohn Baldwin static int
234cd377eb3SJohn Baldwin emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
235cd377eb3SJohn Baldwin     void *arg)
236cd377eb3SJohn Baldwin {
237cd377eb3SJohn Baldwin 	struct emulate_mem_args *ema;
238cd377eb3SJohn Baldwin 
239cd377eb3SJohn Baldwin 	ema = arg;
240cd377eb3SJohn Baldwin 	return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging,
241cd377eb3SJohn Baldwin 	    mem_read, mem_write, mr));
242cd377eb3SJohn Baldwin }
243cd377eb3SJohn Baldwin 
244cd377eb3SJohn Baldwin int
245cd377eb3SJohn Baldwin emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
246cd377eb3SJohn Baldwin     struct vm_guest_paging *paging)
247cd377eb3SJohn Baldwin 
248cd377eb3SJohn Baldwin {
249cd377eb3SJohn Baldwin 	struct emulate_mem_args ema;
250cd377eb3SJohn Baldwin 
251cd377eb3SJohn Baldwin 	ema.vie = vie;
252cd377eb3SJohn Baldwin 	ema.paging = paging;
253cd377eb3SJohn Baldwin 	return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema));
254cd377eb3SJohn Baldwin }
255cd377eb3SJohn Baldwin 
2561b52cd45SJohn Baldwin struct rw_mem_args {
2571b52cd45SJohn Baldwin 	uint64_t *val;
258cd377eb3SJohn Baldwin 	int size;
2591b52cd45SJohn Baldwin 	int operation;
260cd377eb3SJohn Baldwin };
261cd377eb3SJohn Baldwin 
262cd377eb3SJohn Baldwin static int
2631b52cd45SJohn Baldwin rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
264cd377eb3SJohn Baldwin     void *arg)
265cd377eb3SJohn Baldwin {
2661b52cd45SJohn Baldwin 	struct rw_mem_args *rma;
267cd377eb3SJohn Baldwin 
268cd377eb3SJohn Baldwin 	rma = arg;
2691b52cd45SJohn Baldwin 	return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size,
2701b52cd45SJohn Baldwin 	    rma->val, mr->arg1, mr->arg2));
271cd377eb3SJohn Baldwin }
272cd377eb3SJohn Baldwin 
273cd377eb3SJohn Baldwin int
274cd377eb3SJohn Baldwin read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size)
275cd377eb3SJohn Baldwin {
2761b52cd45SJohn Baldwin 	struct rw_mem_args rma;
277cd377eb3SJohn Baldwin 
2781b52cd45SJohn Baldwin 	rma.val = rval;
279cd377eb3SJohn Baldwin 	rma.size = size;
2801b52cd45SJohn Baldwin 	rma.operation = MEM_F_READ;
2811b52cd45SJohn Baldwin 	return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
2821b52cd45SJohn Baldwin }
2831b52cd45SJohn Baldwin 
2841b52cd45SJohn Baldwin int
2851b52cd45SJohn Baldwin write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size)
2861b52cd45SJohn Baldwin {
2871b52cd45SJohn Baldwin 	struct rw_mem_args rma;
2881b52cd45SJohn Baldwin 
2891b52cd45SJohn Baldwin 	rma.val = &wval;
2901b52cd45SJohn Baldwin 	rma.size = size;
2911b52cd45SJohn Baldwin 	rma.operation = MEM_F_WRITE;
2921b52cd45SJohn Baldwin 	return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
293cd377eb3SJohn Baldwin }
294cd377eb3SJohn Baldwin 
2950ab13648SPeter Grehan static int
2960ab13648SPeter Grehan register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
2974d1e669cSPeter Grehan {
298028d9311SNeel Natu 	struct mmio_rb_range *entry, *mrp;
2995f4c83abSMarcelo Araujo 	int err, perror;
3004d1e669cSPeter Grehan 
3014d1e669cSPeter Grehan 	err = 0;
3024d1e669cSPeter Grehan 
3034d1e669cSPeter Grehan 	mrp = malloc(sizeof(struct mmio_rb_range));
304f2b5dc3aSMarcelo Araujo 	if (mrp == NULL) {
305f2b5dc3aSMarcelo Araujo 		warn("%s: couldn't allocate memory for mrp\n",
306f2b5dc3aSMarcelo Araujo 		     __func__);
307f2b5dc3aSMarcelo Araujo 		err = ENOMEM;
308f2b5dc3aSMarcelo Araujo 	} else {
3094d1e669cSPeter Grehan 		mrp->mr_param = *memp;
3104d1e669cSPeter Grehan 		mrp->mr_base = memp->base;
3114d1e669cSPeter Grehan 		mrp->mr_end = memp->base + memp->size - 1;
312ae551da6SNeel Natu 		pthread_rwlock_wrlock(&mmio_rwlock);
313028d9311SNeel Natu 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
3140ab13648SPeter Grehan 			err = mmio_rb_add(rbt, mrp);
3155f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
3165f4c83abSMarcelo Araujo 		assert(perror == 0);
3174d1e669cSPeter Grehan 		if (err)
3184d1e669cSPeter Grehan 			free(mrp);
319f2b5dc3aSMarcelo Araujo 	}
3204d1e669cSPeter Grehan 
3214d1e669cSPeter Grehan 	return (err);
3224d1e669cSPeter Grehan }
3234d1e669cSPeter Grehan 
3240ab13648SPeter Grehan int
3250ab13648SPeter Grehan register_mem(struct mem_range *memp)
3260ab13648SPeter Grehan {
3270ab13648SPeter Grehan 
3280ab13648SPeter Grehan 	return (register_mem_int(&mmio_rb_root, memp));
3290ab13648SPeter Grehan }
3300ab13648SPeter Grehan 
3310ab13648SPeter Grehan int
3320ab13648SPeter Grehan register_mem_fallback(struct mem_range *memp)
3330ab13648SPeter Grehan {
3340ab13648SPeter Grehan 
3350ab13648SPeter Grehan 	return (register_mem_int(&mmio_rb_fallback, memp));
3360ab13648SPeter Grehan }
3370ab13648SPeter Grehan 
338028d9311SNeel Natu int
339028d9311SNeel Natu unregister_mem(struct mem_range *memp)
340028d9311SNeel Natu {
341028d9311SNeel Natu 	struct mem_range *mr;
342028d9311SNeel Natu 	struct mmio_rb_range *entry = NULL;
3435f4c83abSMarcelo Araujo 	int err, perror, i;
344028d9311SNeel Natu 
345ae551da6SNeel Natu 	pthread_rwlock_wrlock(&mmio_rwlock);
346028d9311SNeel Natu 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
347028d9311SNeel Natu 	if (err == 0) {
348028d9311SNeel Natu 		mr = &entry->mr_param;
349028d9311SNeel Natu 		assert(mr->name == memp->name);
350028d9311SNeel Natu 		assert(mr->base == memp->base && mr->size == memp->size);
35112a6eb99SNeel Natu 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
352028d9311SNeel Natu 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
353028d9311SNeel Natu 
354028d9311SNeel Natu 		/* flush Per-vCPU cache */
355028d9311SNeel Natu 		for (i=0; i < VM_MAXCPU; i++) {
356028d9311SNeel Natu 			if (mmio_hint[i] == entry)
357028d9311SNeel Natu 				mmio_hint[i] = NULL;
358028d9311SNeel Natu 		}
359028d9311SNeel Natu 	}
3605f4c83abSMarcelo Araujo 	perror = pthread_rwlock_unlock(&mmio_rwlock);
3615f4c83abSMarcelo Araujo 	assert(perror == 0);
362028d9311SNeel Natu 
363028d9311SNeel Natu 	if (entry)
364028d9311SNeel Natu 		free(entry);
365028d9311SNeel Natu 
366028d9311SNeel Natu 	return (err);
367028d9311SNeel Natu }
368028d9311SNeel Natu 
3694d1e669cSPeter Grehan void
3704d1e669cSPeter Grehan init_mem(void)
3714d1e669cSPeter Grehan {
3724d1e669cSPeter Grehan 
3730ab13648SPeter Grehan 	RB_INIT(&mmio_rb_root);
3740ab13648SPeter Grehan 	RB_INIT(&mmio_rb_fallback);
375ae551da6SNeel Natu 	pthread_rwlock_init(&mmio_rwlock, NULL);
3764d1e669cSPeter Grehan }
377