xref: /freebsd/sys/vm/memguard.c (revision e3813573bd1f9a921ad17f16efbf3f26676784d8)
1c0587701SJoel Dahl /*-
2*e3813573SMatthew D Fleming  * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3*e3813573SMatthew D Fleming  * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4*e3813573SMatthew D Fleming  * All rights reserved.
5e4eb384bSBosko Milekic  *
6e4eb384bSBosko Milekic  * Redistribution and use in source and binary forms, with or without
7e4eb384bSBosko Milekic  * modification, are permitted provided that the following conditions
8e4eb384bSBosko Milekic  * are met:
9e4eb384bSBosko Milekic  * 1. Redistributions of source code must retain the above copyright
10e4eb384bSBosko Milekic  *    notice unmodified, this list of conditions, and the following
11e4eb384bSBosko Milekic  *    disclaimer.
12e4eb384bSBosko Milekic  * 2. Redistributions in binary form must reproduce the above copyright
13e4eb384bSBosko Milekic  *    notice, this list of conditions and the following disclaimer in the
14e4eb384bSBosko Milekic  *    documentation and/or other materials provided with the distribution.
15e4eb384bSBosko Milekic  *
16e4eb384bSBosko Milekic  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17e4eb384bSBosko Milekic  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18e4eb384bSBosko Milekic  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19e4eb384bSBosko Milekic  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20e4eb384bSBosko Milekic  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21e4eb384bSBosko Milekic  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22e4eb384bSBosko Milekic  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23e4eb384bSBosko Milekic  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24e4eb384bSBosko Milekic  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25e4eb384bSBosko Milekic  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26e4eb384bSBosko Milekic  */
27e4eb384bSBosko Milekic 
28e4eb384bSBosko Milekic #include <sys/cdefs.h>
29e4eb384bSBosko Milekic __FBSDID("$FreeBSD$");
30e4eb384bSBosko Milekic 
31e4eb384bSBosko Milekic /*
32e4eb384bSBosko Milekic  * MemGuard is a simple replacement allocator for debugging only
33e4eb384bSBosko Milekic  * which provides ElectricFence-style memory barrier protection on
34e4eb384bSBosko Milekic  * objects being allocated, and is used to detect tampering-after-free
35e4eb384bSBosko Milekic  * scenarios.
36e4eb384bSBosko Milekic  *
37e4eb384bSBosko Milekic  * See the memguard(9) man page for more information on using MemGuard.
38e4eb384bSBosko Milekic  */
39e4eb384bSBosko Milekic 
40e4eb384bSBosko Milekic #include <sys/param.h>
41e4eb384bSBosko Milekic #include <sys/systm.h>
42e4eb384bSBosko Milekic #include <sys/kernel.h>
43e4eb384bSBosko Milekic #include <sys/types.h>
44e4eb384bSBosko Milekic #include <sys/queue.h>
45e4eb384bSBosko Milekic #include <sys/lock.h>
46e4eb384bSBosko Milekic #include <sys/mutex.h>
47e4eb384bSBosko Milekic #include <sys/malloc.h>
48d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h>
49e4eb384bSBosko Milekic 
50e4eb384bSBosko Milekic #include <vm/vm.h>
51*e3813573SMatthew D Fleming #include <vm/uma.h>
5203412565SBosko Milekic #include <vm/vm_param.h>
53e4eb384bSBosko Milekic #include <vm/vm_page.h>
54e4eb384bSBosko Milekic #include <vm/vm_map.h>
55*e3813573SMatthew D Fleming #include <vm/vm_object.h>
56e4eb384bSBosko Milekic #include <vm/vm_extern.h>
57e4eb384bSBosko Milekic #include <vm/memguard.h>
58e4eb384bSBosko Milekic 
59d362c40dSPawel Jakub Dawidek SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
60d362c40dSPawel Jakub Dawidek /*
61d362c40dSPawel Jakub Dawidek  * The vm_memguard_divisor variable controls how much of kmem_map should be
62d362c40dSPawel Jakub Dawidek  * reserved for MemGuard.
63d362c40dSPawel Jakub Dawidek  */
64*e3813573SMatthew D Fleming static u_int vm_memguard_divisor;
65*e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
66*e3813573SMatthew D Fleming     &vm_memguard_divisor,
67d362c40dSPawel Jakub Dawidek     0, "(kmem_size/memguard_divisor) == memguard submap size");
68d362c40dSPawel Jakub Dawidek 
69d362c40dSPawel Jakub Dawidek /*
70d362c40dSPawel Jakub Dawidek  * Short description (ks_shortdesc) of memory type to monitor.
71d362c40dSPawel Jakub Dawidek  */
72d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = "";
73d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL;
74d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
75d362c40dSPawel Jakub Dawidek static int
76d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
77d362c40dSPawel Jakub Dawidek {
78*e3813573SMatthew D Fleming 	char desc[sizeof(vm_memguard_desc)];
79*e3813573SMatthew D Fleming 	int error;
80d362c40dSPawel Jakub Dawidek 
81d362c40dSPawel Jakub Dawidek 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
82d362c40dSPawel Jakub Dawidek 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
83d362c40dSPawel Jakub Dawidek 	if (error != 0 || req->newptr == NULL)
84d362c40dSPawel Jakub Dawidek 		return (error);
85d362c40dSPawel Jakub Dawidek 
86d362c40dSPawel Jakub Dawidek 	mtx_lock(&malloc_mtx);
87d362c40dSPawel Jakub Dawidek 	/*
88d362c40dSPawel Jakub Dawidek 	 * If mtp is NULL, it will be initialized in memguard_cmp().
89d362c40dSPawel Jakub Dawidek 	 */
90*e3813573SMatthew D Fleming 	vm_memguard_mtype = malloc_desc2type(desc);
91d362c40dSPawel Jakub Dawidek 	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
92d362c40dSPawel Jakub Dawidek 	mtx_unlock(&malloc_mtx);
93d362c40dSPawel Jakub Dawidek 	return (error);
94d362c40dSPawel Jakub Dawidek }
95*e3813573SMatthew D Fleming SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
96*e3813573SMatthew D Fleming     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
97d362c40dSPawel Jakub Dawidek     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
98d362c40dSPawel Jakub Dawidek 
99*e3813573SMatthew D Fleming static vm_map_t memguard_map = NULL;
100*e3813573SMatthew D Fleming static vm_offset_t memguard_cursor;
101*e3813573SMatthew D Fleming static vm_size_t memguard_mapsize;
102*e3813573SMatthew D Fleming static vm_size_t memguard_physlimit;
103*e3813573SMatthew D Fleming static u_long memguard_wasted;
104*e3813573SMatthew D Fleming static u_long memguard_wrap;
105*e3813573SMatthew D Fleming static u_long memguard_succ;
106*e3813573SMatthew D Fleming static u_long memguard_fail_kva;
107*e3813573SMatthew D Fleming static u_long memguard_fail_pgs;
108*e3813573SMatthew D Fleming 
109*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
110*e3813573SMatthew D Fleming     &memguard_cursor, 0, "MemGuard cursor");
111*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
112*e3813573SMatthew D Fleming     &memguard_mapsize, 0, "MemGuard private vm_map size");
113*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
114*e3813573SMatthew D Fleming     &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
115*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
116*e3813573SMatthew D Fleming     &memguard_wasted, 0, "Excess memory used through page promotion");
117*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
118*e3813573SMatthew D Fleming     &memguard_wrap, 0, "MemGuard cursor wrap count");
119*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
120*e3813573SMatthew D Fleming     &memguard_succ, 0, "Count of successful MemGuard allocations");
121*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
122*e3813573SMatthew D Fleming     &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
123*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
124*e3813573SMatthew D Fleming     &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
125*e3813573SMatthew D Fleming 
126*e3813573SMatthew D Fleming #define MG_GUARD	0x001
127*e3813573SMatthew D Fleming #define MG_ALLLARGE	0x002
128*e3813573SMatthew D Fleming static int memguard_options = MG_GUARD;
129*e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.options", &memguard_options);
130*e3813573SMatthew D Fleming SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
131*e3813573SMatthew D Fleming     &memguard_options, 0,
132*e3813573SMatthew D Fleming     "MemGuard options:\n"
133*e3813573SMatthew D Fleming     "\t0x001 - add guard pages around each allocation\n"
134*e3813573SMatthew D Fleming     "\t0x002 - always use MemGuard for allocations over a page");
135*e3813573SMatthew D Fleming 
136*e3813573SMatthew D Fleming static u_int memguard_minsize;
137*e3813573SMatthew D Fleming static u_long memguard_minsize_reject;
138*e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
139*e3813573SMatthew D Fleming     &memguard_minsize, 0, "Minimum size for page promotion");
140*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
141*e3813573SMatthew D Fleming     &memguard_minsize_reject, 0, "# times rejected for size");
142*e3813573SMatthew D Fleming 
143*e3813573SMatthew D Fleming static u_int memguard_frequency;
144*e3813573SMatthew D Fleming static u_long memguard_frequency_hits;
145*e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
146*e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
147*e3813573SMatthew D Fleming     &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
148*e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
149*e3813573SMatthew D Fleming     &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
150*e3813573SMatthew D Fleming 
151e4eb384bSBosko Milekic 
152e4eb384bSBosko Milekic /*
153*e3813573SMatthew D Fleming  * Return a fudged value to be used for vm_kmem_size for allocating
154*e3813573SMatthew D Fleming  * the kmem_map.  The memguard memory will be a submap.
155e4eb384bSBosko Milekic  */
156*e3813573SMatthew D Fleming unsigned long
157*e3813573SMatthew D Fleming memguard_fudge(unsigned long km_size, unsigned long km_max)
158*e3813573SMatthew D Fleming {
159*e3813573SMatthew D Fleming 	u_long mem_pgs = cnt.v_page_count;
160e4eb384bSBosko Milekic 
161*e3813573SMatthew D Fleming 	vm_memguard_divisor = 10;
162*e3813573SMatthew D Fleming 	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
163*e3813573SMatthew D Fleming 
164*e3813573SMatthew D Fleming 	/* Pick a conservative value if provided value sucks. */
165*e3813573SMatthew D Fleming 	if ((vm_memguard_divisor <= 0) ||
166*e3813573SMatthew D Fleming 	    ((km_size / vm_memguard_divisor) == 0))
167*e3813573SMatthew D Fleming 		vm_memguard_divisor = 10;
168e4eb384bSBosko Milekic 	/*
169*e3813573SMatthew D Fleming 	 * Limit consumption of physical pages to
170*e3813573SMatthew D Fleming 	 * 1/vm_memguard_divisor of system memory.  If the KVA is
171*e3813573SMatthew D Fleming 	 * smaller than this then the KVA limit comes into play first.
172*e3813573SMatthew D Fleming 	 * This prevents memguard's page promotions from completely
173*e3813573SMatthew D Fleming 	 * using up memory, since most malloc(9) calls are sub-page.
174e4eb384bSBosko Milekic 	 */
175*e3813573SMatthew D Fleming 	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
176*e3813573SMatthew D Fleming 	/*
177*e3813573SMatthew D Fleming 	 * We want as much KVA as we can take safely.  Use at most our
178*e3813573SMatthew D Fleming 	 * allotted fraction of kmem_max.  Limit this to twice the
179*e3813573SMatthew D Fleming 	 * physical memory to avoid using too much memory as pagetable
180*e3813573SMatthew D Fleming 	 * pages.
181*e3813573SMatthew D Fleming 	 */
182*e3813573SMatthew D Fleming 	memguard_mapsize = km_max / vm_memguard_divisor;
183*e3813573SMatthew D Fleming 	/* size must be multiple of PAGE_SIZE */
184*e3813573SMatthew D Fleming 	memguard_mapsize = round_page(memguard_mapsize);
185*e3813573SMatthew D Fleming 	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
186*e3813573SMatthew D Fleming 		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
187*e3813573SMatthew D Fleming 	if (km_size + memguard_mapsize > km_max)
188*e3813573SMatthew D Fleming 		return (km_max);
189*e3813573SMatthew D Fleming 	return (km_size + memguard_mapsize);
190*e3813573SMatthew D Fleming }
191e4eb384bSBosko Milekic 
192e4eb384bSBosko Milekic /*
193e4eb384bSBosko Milekic  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
194e4eb384bSBosko Milekic  * out of a single VM map (contiguous chunk of address space).
195e4eb384bSBosko Milekic  */
196e4eb384bSBosko Milekic void
197*e3813573SMatthew D Fleming memguard_init(vm_map_t parent_map)
198e4eb384bSBosko Milekic {
199*e3813573SMatthew D Fleming 	vm_offset_t base, limit;
200e4eb384bSBosko Milekic 
201*e3813573SMatthew D Fleming 	memguard_map = kmem_suballoc(parent_map, &base, &limit,
202*e3813573SMatthew D Fleming 	    memguard_mapsize, FALSE);
203e4eb384bSBosko Milekic 	memguard_map->system_map = 1;
204*e3813573SMatthew D Fleming 	KASSERT(memguard_mapsize == limit - base,
205*e3813573SMatthew D Fleming 	    ("Expected %lu, got %lu", (u_long)memguard_mapsize,
206*e3813573SMatthew D Fleming 	     (u_long)(limit - base)));
207*e3813573SMatthew D Fleming 	memguard_cursor = base;
208e4eb384bSBosko Milekic 
209e4eb384bSBosko Milekic 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
210*e3813573SMatthew D Fleming 	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
211*e3813573SMatthew D Fleming 	printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit);
212*e3813573SMatthew D Fleming 	printf("\tMEMGUARD map size: %jd KBytes\n",
213*e3813573SMatthew D Fleming 	    (uintmax_t)memguard_mapsize >> 10);
214e4eb384bSBosko Milekic }
215e4eb384bSBosko Milekic 
216e4eb384bSBosko Milekic /*
217*e3813573SMatthew D Fleming  * Run things that can't be done as early as memguard_init().
218*e3813573SMatthew D Fleming  */
219*e3813573SMatthew D Fleming static void
220*e3813573SMatthew D Fleming memguard_sysinit(void)
221*e3813573SMatthew D Fleming {
222*e3813573SMatthew D Fleming 	struct sysctl_oid_list *parent;
223*e3813573SMatthew D Fleming 
224*e3813573SMatthew D Fleming 	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
225*e3813573SMatthew D Fleming 
226*e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
227*e3813573SMatthew D Fleming 	    &memguard_map->min_offset, "MemGuard KVA base");
228*e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
229*e3813573SMatthew D Fleming 	    &memguard_map->max_offset, "MemGuard KVA end");
230*e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
231*e3813573SMatthew D Fleming 	    &memguard_map->size, "MemGuard KVA used");
232*e3813573SMatthew D Fleming }
233*e3813573SMatthew D Fleming SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
234*e3813573SMatthew D Fleming 
235*e3813573SMatthew D Fleming /*
236*e3813573SMatthew D Fleming  * v2sizep() converts a virtual address of the first page allocated for
237*e3813573SMatthew D Fleming  * an item to a pointer to u_long recording the size of the original
238*e3813573SMatthew D Fleming  * allocation request.
239*e3813573SMatthew D Fleming  *
240*e3813573SMatthew D Fleming  * This routine is very similar to those defined by UMA in uma_int.h.
241*e3813573SMatthew D Fleming  * The difference is that this routine stores the originally allocated
242*e3813573SMatthew D Fleming  * size in one of the page's fields that is unused when the page is
243*e3813573SMatthew D Fleming  * wired rather than the object field, which is used.
244*e3813573SMatthew D Fleming  */
245*e3813573SMatthew D Fleming static u_long *
246*e3813573SMatthew D Fleming v2sizep(vm_offset_t va)
247*e3813573SMatthew D Fleming {
248*e3813573SMatthew D Fleming 	struct vm_page *p;
249*e3813573SMatthew D Fleming 
250*e3813573SMatthew D Fleming 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
251*e3813573SMatthew D Fleming 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
252*e3813573SMatthew D Fleming 	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
253*e3813573SMatthew D Fleming 	return ((u_long *)&p->pageq.tqe_next);
254*e3813573SMatthew D Fleming }
255*e3813573SMatthew D Fleming 
256*e3813573SMatthew D Fleming /*
257*e3813573SMatthew D Fleming  * Allocate a single object of specified size with specified flags
258*e3813573SMatthew D Fleming  * (either M_WAITOK or M_NOWAIT).
259e4eb384bSBosko Milekic  */
260e4eb384bSBosko Milekic void *
261*e3813573SMatthew D Fleming memguard_alloc(unsigned long req_size, int flags)
262e4eb384bSBosko Milekic {
263*e3813573SMatthew D Fleming 	vm_offset_t addr;
264*e3813573SMatthew D Fleming 	u_long size_p, size_v;
265*e3813573SMatthew D Fleming 	int do_guard, rv;
266e4eb384bSBosko Milekic 
267*e3813573SMatthew D Fleming 	size_p = round_page(req_size);
268*e3813573SMatthew D Fleming 	if (size_p == 0)
269*e3813573SMatthew D Fleming 		return (NULL);
270e4eb384bSBosko Milekic 	/*
271*e3813573SMatthew D Fleming 	 * To ensure there are holes on both sides of the allocation,
272*e3813573SMatthew D Fleming 	 * request 2 extra pages of KVA.  We will only actually add a
273*e3813573SMatthew D Fleming 	 * vm_map_entry and get pages for the original request.  Save
274*e3813573SMatthew D Fleming 	 * the value of memguard_options so we have a consistent
275*e3813573SMatthew D Fleming 	 * value.
276e4eb384bSBosko Milekic 	 */
277*e3813573SMatthew D Fleming 	size_v = size_p;
278*e3813573SMatthew D Fleming 	do_guard = (memguard_options & MG_GUARD) != 0;
279*e3813573SMatthew D Fleming 	if (do_guard)
280*e3813573SMatthew D Fleming 		size_v += 2 * PAGE_SIZE;
281e4eb384bSBosko Milekic 
282*e3813573SMatthew D Fleming 	vm_map_lock(memguard_map);
283*e3813573SMatthew D Fleming 	/*
284*e3813573SMatthew D Fleming 	 * When we pass our memory limit, reject sub-page allocations.
285*e3813573SMatthew D Fleming 	 * Page-size and larger allocations will use the same amount
286*e3813573SMatthew D Fleming 	 * of physical memory whether we allocate or hand off to
287*e3813573SMatthew D Fleming 	 * uma_large_alloc(), so keep those.
288*e3813573SMatthew D Fleming 	 */
289*e3813573SMatthew D Fleming 	if (memguard_map->size >= memguard_physlimit &&
290*e3813573SMatthew D Fleming 	    req_size < PAGE_SIZE) {
291*e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
292*e3813573SMatthew D Fleming 		memguard_fail_pgs++;
293*e3813573SMatthew D Fleming 		goto out;
294e4eb384bSBosko Milekic 	}
295*e3813573SMatthew D Fleming 	/*
296*e3813573SMatthew D Fleming 	 * Keep a moving cursor so we don't recycle KVA as long as
297*e3813573SMatthew D Fleming 	 * possible.  It's not perfect, since we don't know in what
298*e3813573SMatthew D Fleming 	 * order previous allocations will be free'd, but it's simple
299*e3813573SMatthew D Fleming 	 * and fast, and requires O(1) additional storage if guard
300*e3813573SMatthew D Fleming 	 * pages are not used.
301*e3813573SMatthew D Fleming 	 *
302*e3813573SMatthew D Fleming 	 * XXX This scheme will lead to greater fragmentation of the
303*e3813573SMatthew D Fleming 	 * map, unless vm_map_findspace() is tweaked.
304*e3813573SMatthew D Fleming 	 */
305*e3813573SMatthew D Fleming 	for (;;) {
306*e3813573SMatthew D Fleming 		rv = vm_map_findspace(memguard_map, memguard_cursor,
307*e3813573SMatthew D Fleming 		    size_v, &addr);
308*e3813573SMatthew D Fleming 		if (rv == KERN_SUCCESS)
309*e3813573SMatthew D Fleming 			break;
310*e3813573SMatthew D Fleming 		/*
311*e3813573SMatthew D Fleming 		 * The map has no space.  This may be due to
312*e3813573SMatthew D Fleming 		 * fragmentation, or because the cursor is near the
313*e3813573SMatthew D Fleming 		 * end of the map.
314*e3813573SMatthew D Fleming 		 */
315*e3813573SMatthew D Fleming 		if (memguard_cursor == vm_map_min(memguard_map)) {
316*e3813573SMatthew D Fleming 			memguard_fail_kva++;
317*e3813573SMatthew D Fleming 			addr = (vm_offset_t)NULL;
318*e3813573SMatthew D Fleming 			goto out;
319*e3813573SMatthew D Fleming 		}
320*e3813573SMatthew D Fleming 		memguard_wrap++;
321*e3813573SMatthew D Fleming 		memguard_cursor = vm_map_min(memguard_map);
322*e3813573SMatthew D Fleming 	}
323*e3813573SMatthew D Fleming 	if (do_guard)
324*e3813573SMatthew D Fleming 		addr += PAGE_SIZE;
325*e3813573SMatthew D Fleming 	rv = kmem_back(memguard_map, addr, size_p, flags);
326*e3813573SMatthew D Fleming 	if (rv != KERN_SUCCESS) {
327*e3813573SMatthew D Fleming 		memguard_fail_pgs++;
328*e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
329*e3813573SMatthew D Fleming 		goto out;
330*e3813573SMatthew D Fleming 	}
331*e3813573SMatthew D Fleming 	memguard_cursor = addr + size_p;
332*e3813573SMatthew D Fleming 	*v2sizep(trunc_page(addr)) = req_size;
333*e3813573SMatthew D Fleming 	memguard_succ++;
334*e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE) {
335*e3813573SMatthew D Fleming 		memguard_wasted += (PAGE_SIZE - req_size);
336*e3813573SMatthew D Fleming 		if (do_guard) {
337*e3813573SMatthew D Fleming 			/*
338*e3813573SMatthew D Fleming 			 * Align the request to 16 bytes, and return
339*e3813573SMatthew D Fleming 			 * an address near the end of the page, to
340*e3813573SMatthew D Fleming 			 * better detect array overrun.
341*e3813573SMatthew D Fleming 			 */
342*e3813573SMatthew D Fleming 			req_size = roundup2(req_size, 16);
343*e3813573SMatthew D Fleming 			addr += (PAGE_SIZE - req_size);
344*e3813573SMatthew D Fleming 		}
345*e3813573SMatthew D Fleming 	}
346*e3813573SMatthew D Fleming out:
347*e3813573SMatthew D Fleming 	vm_map_unlock(memguard_map);
348*e3813573SMatthew D Fleming 	return ((void *)addr);
349*e3813573SMatthew D Fleming }
350*e3813573SMatthew D Fleming 
351*e3813573SMatthew D Fleming int
352*e3813573SMatthew D Fleming is_memguard_addr(void *addr)
353*e3813573SMatthew D Fleming {
354*e3813573SMatthew D Fleming 	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
355*e3813573SMatthew D Fleming 
356*e3813573SMatthew D Fleming 	return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
357e4eb384bSBosko Milekic }
358e4eb384bSBosko Milekic 
359e4eb384bSBosko Milekic /*
360e4eb384bSBosko Milekic  * Free specified single object.
361e4eb384bSBosko Milekic  */
362e4eb384bSBosko Milekic void
363*e3813573SMatthew D Fleming memguard_free(void *ptr)
364e4eb384bSBosko Milekic {
365*e3813573SMatthew D Fleming 	vm_offset_t addr;
366*e3813573SMatthew D Fleming 	u_long req_size, size;
367*e3813573SMatthew D Fleming 	char *temp;
368*e3813573SMatthew D Fleming 	int i;
369e4eb384bSBosko Milekic 
370*e3813573SMatthew D Fleming 	addr = trunc_page((uintptr_t)ptr);
371*e3813573SMatthew D Fleming 	req_size = *v2sizep(addr);
372*e3813573SMatthew D Fleming 	size = round_page(req_size);
37303412565SBosko Milekic 
37403412565SBosko Milekic 	/*
375*e3813573SMatthew D Fleming 	 * Page should not be guarded right now, so force a write.
376*e3813573SMatthew D Fleming 	 * The purpose of this is to increase the likelihood of
377*e3813573SMatthew D Fleming 	 * catching a double-free, but not necessarily a
378*e3813573SMatthew D Fleming 	 * tamper-after-free (the second thread freeing might not
379*e3813573SMatthew D Fleming 	 * write before freeing, so this forces it to and,
380*e3813573SMatthew D Fleming 	 * subsequently, trigger a fault).
38103412565SBosko Milekic 	 */
382*e3813573SMatthew D Fleming 	temp = ptr;
383*e3813573SMatthew D Fleming 	for (i = 0; i < size; i += PAGE_SIZE)
384*e3813573SMatthew D Fleming 		temp[i] = 'M';
38503412565SBosko Milekic 
386*e3813573SMatthew D Fleming 	/*
387*e3813573SMatthew D Fleming 	 * This requires carnal knowledge of the implementation of
388*e3813573SMatthew D Fleming 	 * kmem_free(), but since we've already replaced kmem_malloc()
389*e3813573SMatthew D Fleming 	 * above, it's not really any worse.  We want to use the
390*e3813573SMatthew D Fleming 	 * vm_map lock to serialize updates to memguard_wasted, since
391*e3813573SMatthew D Fleming 	 * we had the lock at increment.
392*e3813573SMatthew D Fleming 	 */
393*e3813573SMatthew D Fleming 	vm_map_lock(memguard_map);
394*e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE)
395*e3813573SMatthew D Fleming 		memguard_wasted -= (PAGE_SIZE - req_size);
396*e3813573SMatthew D Fleming 	(void)vm_map_delete(memguard_map, addr, addr + size);
397*e3813573SMatthew D Fleming 	vm_map_unlock(memguard_map);
398e4eb384bSBosko Milekic }
399e4eb384bSBosko Milekic 
400d362c40dSPawel Jakub Dawidek int
401*e3813573SMatthew D Fleming memguard_cmp(struct malloc_type *mtp, unsigned long size)
402d362c40dSPawel Jakub Dawidek {
403d362c40dSPawel Jakub Dawidek 
404*e3813573SMatthew D Fleming 	if (size < memguard_minsize) {
405*e3813573SMatthew D Fleming 		memguard_minsize_reject++;
406*e3813573SMatthew D Fleming 		return (0);
407*e3813573SMatthew D Fleming 	}
408*e3813573SMatthew D Fleming 	if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE)
409*e3813573SMatthew D Fleming 		return (1);
410*e3813573SMatthew D Fleming 	if (memguard_frequency > 0 &&
411*e3813573SMatthew D Fleming 	    (random() % 100000) < memguard_frequency) {
412*e3813573SMatthew D Fleming 		memguard_frequency_hits++;
413*e3813573SMatthew D Fleming 		return (1);
414*e3813573SMatthew D Fleming 	}
415d362c40dSPawel Jakub Dawidek #if 1
416d362c40dSPawel Jakub Dawidek 	/*
417d362c40dSPawel Jakub Dawidek 	 * The safest way of comparsion is to always compare short description
418d362c40dSPawel Jakub Dawidek 	 * string of memory type, but it is also the slowest way.
419d362c40dSPawel Jakub Dawidek 	 */
420d362c40dSPawel Jakub Dawidek 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
421d362c40dSPawel Jakub Dawidek #else
422d362c40dSPawel Jakub Dawidek 	/*
423d362c40dSPawel Jakub Dawidek 	 * If we compare pointers, there are two possible problems:
424d362c40dSPawel Jakub Dawidek 	 * 1. Memory type was unloaded and new memory type was allocated at the
425d362c40dSPawel Jakub Dawidek 	 *    same address.
426d362c40dSPawel Jakub Dawidek 	 * 2. Memory type was unloaded and loaded again, but allocated at a
427d362c40dSPawel Jakub Dawidek 	 *    different address.
428d362c40dSPawel Jakub Dawidek 	 */
429d362c40dSPawel Jakub Dawidek 	if (vm_memguard_mtype != NULL)
430d362c40dSPawel Jakub Dawidek 		return (mtp == vm_memguard_mtype);
431d362c40dSPawel Jakub Dawidek 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
432d362c40dSPawel Jakub Dawidek 		vm_memguard_mtype = mtp;
433d362c40dSPawel Jakub Dawidek 		return (1);
434d362c40dSPawel Jakub Dawidek 	}
435d362c40dSPawel Jakub Dawidek 	return (0);
436d362c40dSPawel Jakub Dawidek #endif
437d362c40dSPawel Jakub Dawidek }
438