xref: /freebsd/sys/vm/memguard.c (revision cf4c5a533126ca1ddb1f070af73f8f53b9e77fd4)
1 /*
2  * Copyright (c) 2005,
3  *     Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /*
31  * MemGuard is a simple replacement allocator for debugging only
32  * which provides ElectricFence-style memory barrier protection on
33  * objects being allocated, and is used to detect tampering-after-free
34  * scenarios.
35  *
36  * See the memguard(9) man page for more information on using MemGuard.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/types.h>
43 #include <sys/queue.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/malloc.h>
47 #include <sys/sysctl.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_extern.h>
54 #include <vm/memguard.h>
55 
56 /*
57  * The maximum number of pages allowed per allocation.  If you're using
58  * MemGuard to override very large items (> MAX_PAGES_PER_ITEM in size),
59  * you need to increase MAX_PAGES_PER_ITEM.
60  */
61 #define	MAX_PAGES_PER_ITEM	64
62 
63 SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
64 /*
65  * The vm_memguard_divisor variable controls how much of kmem_map should be
66  * reserved for MemGuard.
67  */
68 u_int vm_memguard_divisor;
69 SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RD, &vm_memguard_divisor,
70     0, "(kmem_size/memguard_divisor) == memguard submap size");
71 
72 /*
73  * Short description (ks_shortdesc) of memory type to monitor.
74  */
75 static char vm_memguard_desc[128] = "";
76 static struct malloc_type *vm_memguard_mtype = NULL;
77 TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
78 static int
79 memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
80 {
81 	struct malloc_type_internal *mtip;
82 	struct malloc_type_stats *mtsp;
83 	struct malloc_type *mtp;
84 	char desc[128];
85 	long bytes;
86 	int error, i;
87 
88 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
89 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
90 	if (error != 0 || req->newptr == NULL)
91 		return (error);
92 
93 	/*
94 	 * We can change memory type when no memory has been allocated for it
95 	 * or when there is no such memory type yet (ie. it will be loaded with
96 	 * kernel module).
97 	 */
98 	bytes = 0;
99 	mtx_lock(&malloc_mtx);
100 	mtp = malloc_desc2type(desc);
101 	if (mtp != NULL) {
102 		mtip = mtp->ks_handle;
103 		for (i = 0; i < MAXCPU; i++) {
104 			mtsp = &mtip->mti_stats[i];
105 			bytes += mtsp->mts_memalloced;
106 			bytes -= mtsp->mts_memfreed;
107 		}
108 	}
109 	if (bytes > 0)
110 		error = EBUSY;
111 	else {
112 		/*
113 		 * If mtp is NULL, it will be initialized in memguard_cmp().
114 		 */
115 		vm_memguard_mtype = mtp;
116 		strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
117 	}
118 	mtx_unlock(&malloc_mtx);
119 	return (error);
120 }
121 SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
122     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
123 
124 /*
125  * Global MemGuard data.
126  */
127 static vm_map_t memguard_map;
128 static unsigned long memguard_mapsize;
129 static unsigned long memguard_mapused;
130 struct memguard_entry {
131 	STAILQ_ENTRY(memguard_entry) entries;
132 	void *ptr;
133 };
134 static struct memguard_fifo {
135 	struct memguard_entry *stqh_first;
136 	struct memguard_entry **stqh_last;
137 	int index;
138 } memguard_fifo_pool[MAX_PAGES_PER_ITEM];
139 
140 /*
141  * Local prototypes.
142  */
143 static void memguard_guard(void *addr, int numpgs);
144 static void memguard_unguard(void *addr, int numpgs);
145 static struct memguard_fifo *vtomgfifo(vm_offset_t va);
146 static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo);
147 static void vclrmgfifo(vm_offset_t va);
148 
149 /*
150  * Local macros.  MemGuard data is global, so replace these with whatever
151  * your system uses to protect global data (if it is kernel-level
152  * parallelized).  This is for porting among BSDs.
153  */
154 #define	MEMGUARD_CRIT_SECTION_DECLARE	static struct mtx memguard_mtx
155 #define	MEMGUARD_CRIT_SECTION_INIT				\
156 	mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF)
157 #define	MEMGUARD_CRIT_SECTION_ENTER	mtx_lock(&memguard_mtx)
158 #define	MEMGUARD_CRIT_SECTION_EXIT	mtx_unlock(&memguard_mtx)
159 MEMGUARD_CRIT_SECTION_DECLARE;
160 
161 /*
162  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
163  * out of a single VM map (contiguous chunk of address space).
164  */
165 void
166 memguard_init(vm_map_t parent_map, unsigned long size)
167 {
168 	char *base, *limit;
169 	int i;
170 
171 	/* size must be multiple of PAGE_SIZE */
172 	size /= PAGE_SIZE;
173 	size++;
174 	size *= PAGE_SIZE;
175 
176 	memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base,
177 	    (vm_offset_t *)&limit, (vm_size_t)size, FALSE);
178 	memguard_map->system_map = 1;
179 	memguard_mapsize = size;
180 	memguard_mapused = 0;
181 
182 	MEMGUARD_CRIT_SECTION_INIT;
183 	MEMGUARD_CRIT_SECTION_ENTER;
184 	for (i = 0; i < MAX_PAGES_PER_ITEM; i++) {
185 		STAILQ_INIT(&memguard_fifo_pool[i]);
186 		memguard_fifo_pool[i].index = i;
187 	}
188 	MEMGUARD_CRIT_SECTION_EXIT;
189 
190 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
191 	printf("\tMEMGUARD map base: %p\n", base);
192 	printf("\tMEMGUARD map limit: %p\n", limit);
193 	printf("\tMEMGUARD map size: %ld (Bytes)\n", size);
194 }
195 
196 /*
197  * Allocate a single object of specified size with specified flags (either
198  * M_WAITOK or M_NOWAIT).
199  */
200 void *
201 memguard_alloc(unsigned long size, int flags)
202 {
203 	void *obj;
204 	struct memguard_entry *e = NULL;
205 	int numpgs;
206 
207 	numpgs = size / PAGE_SIZE;
208 	if ((size % PAGE_SIZE) != 0)
209 		numpgs++;
210 	if (numpgs > MAX_PAGES_PER_ITEM)
211 		panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \
212 		    "in memguard.c (requested: %d pages)", numpgs);
213 	if (numpgs == 0)
214 		return NULL;
215 
216 	/*
217 	 * If we haven't exhausted the memguard_map yet, allocate from
218 	 * it and grab a new page, even if we have recycled pages in our
219 	 * FIFO.  This is because we wish to allow recycled pages to live
220 	 * guarded in the FIFO for as long as possible in order to catch
221 	 * even very late tamper-after-frees, even though it means that
222 	 * we end up wasting more memory, this is only a DEBUGGING allocator
223 	 * after all.
224 	 */
225 	MEMGUARD_CRIT_SECTION_ENTER;
226 	if (memguard_mapused >= memguard_mapsize) {
227 		e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]);
228 		if (e != NULL) {
229 			STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e,
230 			    memguard_entry, entries);
231 			MEMGUARD_CRIT_SECTION_EXIT;
232 			obj = e->ptr;
233 			free(e, M_TEMP);
234 			memguard_unguard(obj, numpgs);
235 			if (flags & M_ZERO)
236 				bzero(obj, PAGE_SIZE * numpgs);
237 			return obj;
238 		}
239 		MEMGUARD_CRIT_SECTION_EXIT;
240 		if (flags & M_WAITOK)
241 			panic("MEMGUARD: Failed with M_WAITOK: " \
242 			    "memguard_map too small");
243 		return NULL;
244 	}
245 	memguard_mapused += (PAGE_SIZE * numpgs);
246 	MEMGUARD_CRIT_SECTION_EXIT;
247 
248 	obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags);
249 	if (obj != NULL) {
250 		vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]);
251 		if (flags & M_ZERO)
252 			bzero(obj, PAGE_SIZE * numpgs);
253 	} else {
254 		MEMGUARD_CRIT_SECTION_ENTER;
255 		memguard_mapused -= (PAGE_SIZE * numpgs);
256 		MEMGUARD_CRIT_SECTION_EXIT;
257 	}
258 	return obj;
259 }
260 
261 /*
262  * Free specified single object.
263  */
264 void
265 memguard_free(void *addr)
266 {
267 	struct memguard_entry *e;
268 	struct memguard_fifo *mgfifo;
269 	int idx;
270 	int *temp;
271 
272 	addr = (void *)trunc_page((unsigned long)addr);
273 
274 	/*
275 	 * Page should not be guarded by now, so force a write.
276 	 * The purpose of this is to increase the likelihood of catching a
277 	 * double-free, but not necessarily a tamper-after-free (the second
278 	 * thread freeing might not write before freeing, so this forces it
279 	 * to and, subsequently, trigger a fault).
280 	 */
281 	temp = (int *)((unsigned long)addr + (PAGE_SIZE/2)); 	/* in page */
282 	*temp = 0xd34dc0d3;
283 
284 	mgfifo = vtomgfifo((vm_offset_t)addr);
285 	idx = mgfifo->index;
286 	memguard_guard(addr, idx + 1);
287 	e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT);
288 	if (e == NULL) {
289 		MEMGUARD_CRIT_SECTION_ENTER;
290 		memguard_mapused -= (PAGE_SIZE * (idx + 1));
291 		MEMGUARD_CRIT_SECTION_EXIT;
292 		memguard_unguard(addr, idx + 1);	/* just in case */
293 		vclrmgfifo((vm_offset_t)addr);
294 		kmem_free(memguard_map, (vm_offset_t)addr,
295 		    PAGE_SIZE * (idx + 1));
296 		return;
297 	}
298 	e->ptr = addr;
299 	MEMGUARD_CRIT_SECTION_ENTER;
300 	STAILQ_INSERT_TAIL(mgfifo, e, entries);
301 	MEMGUARD_CRIT_SECTION_EXIT;
302 }
303 
304 int
305 memguard_cmp(struct malloc_type *mtp)
306 {
307 
308 #if 1
309 	/*
310 	 * The safest way of comparsion is to always compare short description
311 	 * string of memory type, but it is also the slowest way.
312 	 */
313 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
314 #else
315 	/*
316 	 * If we compare pointers, there are two possible problems:
317 	 * 1. Memory type was unloaded and new memory type was allocated at the
318 	 *    same address.
319 	 * 2. Memory type was unloaded and loaded again, but allocated at a
320 	 *    different address.
321 	 */
322 	if (vm_memguard_mtype != NULL)
323 		return (mtp == vm_memguard_mtype);
324 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
325 		vm_memguard_mtype = mtp;
326 		return (1);
327 	}
328 	return (0);
329 #endif
330 }
331 
332 /*
333  * Guard a page containing specified object (make it read-only so that
334  * future writes to it fail).
335  */
336 static void
337 memguard_guard(void *addr, int numpgs)
338 {
339 	void *a = (void *)trunc_page((unsigned long)addr);
340 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
341 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
342 	    VM_PROT_READ, FALSE) != KERN_SUCCESS)
343 		panic("MEMGUARD: Unable to guard page!");
344 }
345 
346 /*
347  * Unguard a page containing specified object (make it read-and-write to
348  * allow full data access).
349  */
350 static void
351 memguard_unguard(void *addr, int numpgs)
352 {
353 	void *a = (void *)trunc_page((unsigned long)addr);
354 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
355 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
356 	    VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS)
357 		panic("MEMGUARD: Unable to unguard page!");
358 }
359 
360 /*
361  * vtomgfifo() converts a virtual address of the first page allocated for
362  * an item to a memguard_fifo_pool reference for the corresponding item's
363  * size.
364  *
365  * vsetmgfifo() sets a reference in an underlying page for the specified
366  * virtual address to an appropriate memguard_fifo_pool.
367  *
368  * These routines are very similar to those defined by UMA in uma_int.h.
369  * The difference is that these routines store the mgfifo in one of the
370  * page's fields that is unused when the page is wired rather than the
371  * object field, which is used.
372  */
373 static struct memguard_fifo *
374 vtomgfifo(vm_offset_t va)
375 {
376 	vm_page_t p;
377 	struct memguard_fifo *mgfifo;
378 
379 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
380 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
381 	    ("MEMGUARD: Expected wired page in vtomgfifo!"));
382 	mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
383 	return mgfifo;
384 }
385 
386 static void
387 vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
388 {
389 	vm_page_t p;
390 
391 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
392 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
393 	    ("MEMGUARD: Expected wired page in vsetmgfifo!"));
394 	p->pageq.tqe_next = (vm_page_t)mgfifo;
395 }
396 
397 static void vclrmgfifo(vm_offset_t va)
398 {
399 	vm_page_t p;
400 
401 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
402 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
403 	    ("MEMGUARD: Expected wired page in vclrmgfifo!"));
404 	p->pageq.tqe_next = NULL;
405 }
406