xref: /freebsd/sys/vm/memguard.c (revision c0b9f4fe659b6839541970eb5675e57f4d814969)
1 /*
2  * Copyright (c) 2005,
3  *     Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /*
31  * MemGuard is a simple replacement allocator for debugging only
32  * which provides ElectricFence-style memory barrier protection on
33  * objects being allocated, and is used to detect tampering-after-free
34  * scenarios.
35  *
36  * See the memguard(9) man page for more information on using MemGuard.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/types.h>
43 #include <sys/queue.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/malloc.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/memguard.h>
54 
55 /*
56  * The maximum number of pages allowed per allocation.  If you're using
57  * MemGuard to override very large items (> MAX_PAGES_PER_ITEM in size),
58  * you need to increase MAX_PAGES_PER_ITEM.
59  */
60 #define	MAX_PAGES_PER_ITEM	64
61 
62 /*
63  * Global MemGuard data.
64  */
65 static vm_map_t memguard_map;
66 static unsigned long memguard_mapsize;
67 static unsigned long memguard_mapused;
68 struct memguard_entry {
69 	STAILQ_ENTRY(memguard_entry) entries;
70 	void *ptr;
71 };
72 static struct memguard_fifo {
73 	struct memguard_entry *stqh_first;
74 	struct memguard_entry **stqh_last;
75 	int index;
76 } memguard_fifo_pool[MAX_PAGES_PER_ITEM];
77 
78 /*
79  * Local prototypes.
80  */
81 static void memguard_guard(void *addr, int numpgs);
82 static void memguard_unguard(void *addr, int numpgs);
83 static struct memguard_fifo *vtomgfifo(vm_offset_t va);
84 static void vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo);
85 static void vclrmgfifo(vm_offset_t va);
86 
87 /*
88  * Local macros.  MemGuard data is global, so replace these with whatever
89  * your system uses to protect global data (if it is kernel-level
90  * parallelized).  This is for porting among BSDs.
91  */
92 #define	MEMGUARD_CRIT_SECTION_DECLARE	static struct mtx memguard_mtx
93 #define	MEMGUARD_CRIT_SECTION_INIT				\
94 	mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF)
95 #define	MEMGUARD_CRIT_SECTION_ENTER	mtx_lock(&memguard_mtx)
96 #define	MEMGUARD_CRIT_SECTION_EXIT	mtx_unlock(&memguard_mtx)
97 MEMGUARD_CRIT_SECTION_DECLARE;
98 
99 /*
100  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
101  * out of a single VM map (contiguous chunk of address space).
102  */
103 void
104 memguard_init(vm_map_t parent_map, unsigned long size)
105 {
106 	char *base, *limit;
107 	int i;
108 
109 	/* size must be multiple of PAGE_SIZE */
110 	size /= PAGE_SIZE;
111 	size++;
112 	size *= PAGE_SIZE;
113 
114 	memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base,
115 	    (vm_offset_t *)&limit, (vm_size_t)size);
116 	memguard_map->system_map = 1;
117 	memguard_mapsize = size;
118 	memguard_mapused = 0;
119 
120 	MEMGUARD_CRIT_SECTION_INIT;
121 	MEMGUARD_CRIT_SECTION_ENTER;
122 	for (i = 0; i < MAX_PAGES_PER_ITEM; i++) {
123 		STAILQ_INIT(&memguard_fifo_pool[i]);
124 		memguard_fifo_pool[i].index = i;
125 	}
126 	MEMGUARD_CRIT_SECTION_EXIT;
127 
128 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
129 	printf("\tMEMGUARD map base: %p\n", base);
130 	printf("\tMEMGUARD map limit: %p\n", limit);
131 	printf("\tMEMGUARD map size: %ld (Bytes)\n", size);
132 }
133 
134 /*
135  * Allocate a single object of specified size with specified flags (either
136  * M_WAITOK or M_NOWAIT).
137  */
138 void *
139 memguard_alloc(unsigned long size, int flags)
140 {
141 	void *obj;
142 	struct memguard_entry *e = NULL;
143 	int numpgs;
144 
145 	numpgs = size / PAGE_SIZE;
146 	if ((size % PAGE_SIZE) != 0)
147 		numpgs++;
148 	if (numpgs > MAX_PAGES_PER_ITEM)
149 		panic("MEMGUARD: You must increase MAX_PAGES_PER_ITEM " \
150 		    "in memguard.c (requested: %d pages)", numpgs);
151 	if (numpgs == 0)
152 		return NULL;
153 
154 	/*
155 	 * If we haven't exhausted the memguard_map yet, allocate from
156 	 * it and grab a new page, even if we have recycled pages in our
157 	 * FIFO.  This is because we wish to allow recycled pages to live
158 	 * guarded in the FIFO for as long as possible in order to catch
159 	 * even very late tamper-after-frees, even though it means that
160 	 * we end up wasting more memory, this is only a DEBUGGING allocator
161 	 * after all.
162 	 */
163 	MEMGUARD_CRIT_SECTION_ENTER;
164 	if (memguard_mapused >= memguard_mapsize) {
165 		e = STAILQ_FIRST(&memguard_fifo_pool[numpgs - 1]);
166 		if (e != NULL) {
167 			STAILQ_REMOVE(&memguard_fifo_pool[numpgs - 1], e,
168 			    memguard_entry, entries);
169 			MEMGUARD_CRIT_SECTION_EXIT;
170 			obj = e->ptr;
171 			free(e, M_TEMP);
172 			memguard_unguard(obj, numpgs);
173 			if (flags & M_ZERO)
174 				bzero(obj, PAGE_SIZE * numpgs);
175 			return obj;
176 		}
177 		MEMGUARD_CRIT_SECTION_EXIT;
178 		if (flags & M_WAITOK)
179 			panic("MEMGUARD: Failed with M_WAITOK: " \
180 			    "memguard_map too small");
181 		return NULL;
182 	}
183 	memguard_mapused += (PAGE_SIZE * numpgs);
184 	MEMGUARD_CRIT_SECTION_EXIT;
185 
186 	obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE * numpgs, flags);
187 	if (obj != NULL) {
188 		vsetmgfifo((vm_offset_t)obj, &memguard_fifo_pool[numpgs - 1]);
189 		if (flags & M_ZERO)
190 			bzero(obj, PAGE_SIZE * numpgs);
191 	} else {
192 		MEMGUARD_CRIT_SECTION_ENTER;
193 		memguard_mapused -= (PAGE_SIZE * numpgs);
194 		MEMGUARD_CRIT_SECTION_EXIT;
195 	}
196 	return obj;
197 }
198 
199 /*
200  * Free specified single object.
201  */
202 void
203 memguard_free(void *addr)
204 {
205 	struct memguard_entry *e;
206 	struct memguard_fifo *mgfifo;
207 	int idx;
208 	int *temp;
209 
210 	addr = (void *)trunc_page((unsigned long)addr);
211 
212 	/*
213 	 * Page should not be guarded by now, so force a write.
214 	 * The purpose of this is to increase the likelihood of catching a
215 	 * double-free, but not necessarily a tamper-after-free (the second
216 	 * thread freeing might not write before freeing, so this forces it
217 	 * to and, subsequently, trigger a fault).
218 	 */
219 	temp = (int *)((unsigned long)addr + (PAGE_SIZE/2)); 	/* in page */
220 	*temp = 0xd34dc0d3;
221 
222 	mgfifo = vtomgfifo((vm_offset_t)addr);
223 	idx = mgfifo->index;
224 	memguard_guard(addr, idx + 1);
225 	e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT);
226 	if (e == NULL) {
227 		MEMGUARD_CRIT_SECTION_ENTER;
228 		memguard_mapused -= (PAGE_SIZE * (idx + 1));
229 		MEMGUARD_CRIT_SECTION_EXIT;
230 		memguard_unguard(addr, idx + 1);	/* just in case */
231 		vclrmgfifo((vm_offset_t)addr);
232 		kmem_free(memguard_map, (vm_offset_t)addr,
233 		    PAGE_SIZE * (idx + 1));
234 		return;
235 	}
236 	e->ptr = addr;
237 	MEMGUARD_CRIT_SECTION_ENTER;
238 	STAILQ_INSERT_TAIL(mgfifo, e, entries);
239 	MEMGUARD_CRIT_SECTION_EXIT;
240 }
241 
242 /*
243  * Guard a page containing specified object (make it read-only so that
244  * future writes to it fail).
245  */
246 static void
247 memguard_guard(void *addr, int numpgs)
248 {
249 	void *a = (void *)trunc_page((unsigned long)addr);
250 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
251 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
252 	    VM_PROT_READ, FALSE) != KERN_SUCCESS)
253 		panic("MEMGUARD: Unable to guard page!");
254 }
255 
256 /*
257  * Unguard a page containing specified object (make it read-and-write to
258  * allow full data access).
259  */
260 static void
261 memguard_unguard(void *addr, int numpgs)
262 {
263 	void *a = (void *)trunc_page((unsigned long)addr);
264 	if (vm_map_protect(memguard_map, (vm_offset_t)a,
265 	    (vm_offset_t)((unsigned long)a + (PAGE_SIZE * numpgs)),
266 	    VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS)
267 		panic("MEMGUARD: Unable to unguard page!");
268 }
269 
270 /*
271  * vtomgfifo() converts a virtual address of the first page allocated for
272  * an item to a memguard_fifo_pool reference for the corresponding item's
273  * size.
274  *
275  * vsetmgfifo() sets a reference in an underlying page for the specified
276  * virtual address to an appropriate memguard_fifo_pool.
277  *
278  * These routines are very similar to those defined by UMA in uma_int.h.
279  * The difference is that these routines store the mgfifo in one of the
280  * page's fields that is unused when the page is wired rather than the
281  * object field, which is used.
282  */
283 static struct memguard_fifo *
284 vtomgfifo(vm_offset_t va)
285 {
286 	vm_page_t p;
287 	struct memguard_fifo *mgfifo;
288 
289 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
290 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
291 	    ("MEMGUARD: Expected wired page in vtomgfifo!"));
292 	mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
293 	return mgfifo;
294 }
295 
296 static void
297 vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
298 {
299 	vm_page_t p;
300 
301 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
302 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
303 	    ("MEMGUARD: Expected wired page in vsetmgfifo!"));
304 	p->pageq.tqe_next = (vm_page_t)mgfifo;
305 }
306 
307 static void vclrmgfifo(vm_offset_t va)
308 {
309 	vm_page_t p;
310 
311 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
312 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
313 	    ("MEMGUARD: Expected wired page in vclrmgfifo!"));
314 	p->pageq.tqe_next = NULL;
315 }
316