1c0587701SJoel Dahl /*- 2e3813573SMatthew D Fleming * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 3e3813573SMatthew D Fleming * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 4e3813573SMatthew D Fleming * All rights reserved. 5e4eb384bSBosko Milekic * 6e4eb384bSBosko Milekic * Redistribution and use in source and binary forms, with or without 7e4eb384bSBosko Milekic * modification, are permitted provided that the following conditions 8e4eb384bSBosko Milekic * are met: 9e4eb384bSBosko Milekic * 1. Redistributions of source code must retain the above copyright 10e4eb384bSBosko Milekic * notice unmodified, this list of conditions, and the following 11e4eb384bSBosko Milekic * disclaimer. 12e4eb384bSBosko Milekic * 2. Redistributions in binary form must reproduce the above copyright 13e4eb384bSBosko Milekic * notice, this list of conditions and the following disclaimer in the 14e4eb384bSBosko Milekic * documentation and/or other materials provided with the distribution. 15e4eb384bSBosko Milekic * 16e4eb384bSBosko Milekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17e4eb384bSBosko Milekic * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18e4eb384bSBosko Milekic * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19e4eb384bSBosko Milekic * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20e4eb384bSBosko Milekic * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21e4eb384bSBosko Milekic * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22e4eb384bSBosko Milekic * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23e4eb384bSBosko Milekic * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24e4eb384bSBosko Milekic * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25e4eb384bSBosko Milekic * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26e4eb384bSBosko Milekic */ 27e4eb384bSBosko Milekic 28e4eb384bSBosko Milekic #include <sys/cdefs.h> 29e4eb384bSBosko Milekic __FBSDID("$FreeBSD$"); 30e4eb384bSBosko Milekic 31e4eb384bSBosko Milekic /* 32e4eb384bSBosko Milekic * MemGuard is a simple replacement allocator for debugging only 33e4eb384bSBosko Milekic * which provides ElectricFence-style memory barrier protection on 34e4eb384bSBosko Milekic * objects being allocated, and is used to detect tampering-after-free 35e4eb384bSBosko Milekic * scenarios. 36e4eb384bSBosko Milekic * 37e4eb384bSBosko Milekic * See the memguard(9) man page for more information on using MemGuard. 38e4eb384bSBosko Milekic */ 39e4eb384bSBosko Milekic 40*f02d86e2SMatthew D Fleming #include "opt_vm.h" 41*f02d86e2SMatthew D Fleming 42e4eb384bSBosko Milekic #include <sys/param.h> 43e4eb384bSBosko Milekic #include <sys/systm.h> 44e4eb384bSBosko Milekic #include <sys/kernel.h> 45e4eb384bSBosko Milekic #include <sys/types.h> 46e4eb384bSBosko Milekic #include <sys/queue.h> 47e4eb384bSBosko Milekic #include <sys/lock.h> 48e4eb384bSBosko Milekic #include <sys/mutex.h> 49e4eb384bSBosko Milekic #include <sys/malloc.h> 50d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h> 51e4eb384bSBosko Milekic 52e4eb384bSBosko Milekic #include <vm/vm.h> 53e3813573SMatthew D Fleming #include <vm/uma.h> 5403412565SBosko Milekic #include <vm/vm_param.h> 55e4eb384bSBosko Milekic #include <vm/vm_page.h> 56e4eb384bSBosko Milekic #include <vm/vm_map.h> 57e3813573SMatthew D Fleming #include <vm/vm_object.h> 58e4eb384bSBosko Milekic #include <vm/vm_extern.h> 59e4eb384bSBosko Milekic #include <vm/memguard.h> 60e4eb384bSBosko Milekic 61d362c40dSPawel Jakub Dawidek SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 62d362c40dSPawel Jakub Dawidek /* 63d362c40dSPawel Jakub Dawidek * The vm_memguard_divisor variable controls how much of kmem_map should be 64d362c40dSPawel Jakub Dawidek * reserved for MemGuard. 65d362c40dSPawel Jakub Dawidek */ 66e3813573SMatthew D Fleming static u_int vm_memguard_divisor; 67e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN, 68e3813573SMatthew D Fleming &vm_memguard_divisor, 69d362c40dSPawel Jakub Dawidek 0, "(kmem_size/memguard_divisor) == memguard submap size"); 70d362c40dSPawel Jakub Dawidek 71d362c40dSPawel Jakub Dawidek /* 72d362c40dSPawel Jakub Dawidek * Short description (ks_shortdesc) of memory type to monitor. 73d362c40dSPawel Jakub Dawidek */ 74d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = ""; 75d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL; 76d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); 77d362c40dSPawel Jakub Dawidek static int 78d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) 79d362c40dSPawel Jakub Dawidek { 80e3813573SMatthew D Fleming char desc[sizeof(vm_memguard_desc)]; 81e3813573SMatthew D Fleming int error; 82d362c40dSPawel Jakub Dawidek 83d362c40dSPawel Jakub Dawidek strlcpy(desc, vm_memguard_desc, sizeof(desc)); 84d362c40dSPawel Jakub Dawidek error = sysctl_handle_string(oidp, desc, sizeof(desc), req); 85d362c40dSPawel Jakub Dawidek if (error != 0 || req->newptr == NULL) 86d362c40dSPawel Jakub Dawidek return (error); 87d362c40dSPawel Jakub Dawidek 88d362c40dSPawel Jakub Dawidek mtx_lock(&malloc_mtx); 89d362c40dSPawel Jakub Dawidek /* 90d362c40dSPawel Jakub Dawidek * If mtp is NULL, it will be initialized in memguard_cmp(). 91d362c40dSPawel Jakub Dawidek */ 92e3813573SMatthew D Fleming vm_memguard_mtype = malloc_desc2type(desc); 93d362c40dSPawel Jakub Dawidek strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 94d362c40dSPawel Jakub Dawidek mtx_unlock(&malloc_mtx); 95d362c40dSPawel Jakub Dawidek return (error); 96d362c40dSPawel Jakub Dawidek } 97e3813573SMatthew D Fleming SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 98e3813573SMatthew D Fleming CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 99d362c40dSPawel Jakub Dawidek memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 100d362c40dSPawel Jakub Dawidek 101e3813573SMatthew D Fleming static vm_map_t memguard_map = NULL; 102e3813573SMatthew D Fleming static vm_offset_t memguard_cursor; 103e3813573SMatthew D Fleming static vm_size_t memguard_mapsize; 104e3813573SMatthew D Fleming static vm_size_t memguard_physlimit; 105e3813573SMatthew D Fleming static u_long memguard_wasted; 106e3813573SMatthew D Fleming static u_long memguard_wrap; 107e3813573SMatthew D Fleming static u_long memguard_succ; 108e3813573SMatthew D Fleming static u_long memguard_fail_kva; 109e3813573SMatthew D Fleming static u_long memguard_fail_pgs; 110e3813573SMatthew D Fleming 111e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD, 112e3813573SMatthew D Fleming &memguard_cursor, 0, "MemGuard cursor"); 113e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, 114e3813573SMatthew D Fleming &memguard_mapsize, 0, "MemGuard private vm_map size"); 115e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 116e3813573SMatthew D Fleming &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 117e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 118e3813573SMatthew D Fleming &memguard_wasted, 0, "Excess memory used through page promotion"); 119e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD, 120e3813573SMatthew D Fleming &memguard_wrap, 0, "MemGuard cursor wrap count"); 121e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 122e3813573SMatthew D Fleming &memguard_succ, 0, "Count of successful MemGuard allocations"); 123e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 124e3813573SMatthew D Fleming &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 125e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 126e3813573SMatthew D Fleming &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 127e3813573SMatthew D Fleming 128e3813573SMatthew D Fleming #define MG_GUARD 0x001 129e3813573SMatthew D Fleming #define MG_ALLLARGE 0x002 130e3813573SMatthew D Fleming static int memguard_options = MG_GUARD; 131e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.options", &memguard_options); 132e3813573SMatthew D Fleming SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW, 133e3813573SMatthew D Fleming &memguard_options, 0, 134e3813573SMatthew D Fleming "MemGuard options:\n" 135e3813573SMatthew D Fleming "\t0x001 - add guard pages around each allocation\n" 136e3813573SMatthew D Fleming "\t0x002 - always use MemGuard for allocations over a page"); 137e3813573SMatthew D Fleming 138e3813573SMatthew D Fleming static u_int memguard_minsize; 139e3813573SMatthew D Fleming static u_long memguard_minsize_reject; 140e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 141e3813573SMatthew D Fleming &memguard_minsize, 0, "Minimum size for page promotion"); 142e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 143e3813573SMatthew D Fleming &memguard_minsize_reject, 0, "# times rejected for size"); 144e3813573SMatthew D Fleming 145e3813573SMatthew D Fleming static u_int memguard_frequency; 146e3813573SMatthew D Fleming static u_long memguard_frequency_hits; 147e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.frequency", &memguard_frequency); 148e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW, 149e3813573SMatthew D Fleming &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run"); 150e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, 151e3813573SMatthew D Fleming &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); 152e3813573SMatthew D Fleming 153e4eb384bSBosko Milekic 154e4eb384bSBosko Milekic /* 155e3813573SMatthew D Fleming * Return a fudged value to be used for vm_kmem_size for allocating 156e3813573SMatthew D Fleming * the kmem_map. The memguard memory will be a submap. 157e4eb384bSBosko Milekic */ 158e3813573SMatthew D Fleming unsigned long 159e3813573SMatthew D Fleming memguard_fudge(unsigned long km_size, unsigned long km_max) 160e3813573SMatthew D Fleming { 161e3813573SMatthew D Fleming u_long mem_pgs = cnt.v_page_count; 162e4eb384bSBosko Milekic 163e3813573SMatthew D Fleming vm_memguard_divisor = 10; 164e3813573SMatthew D Fleming TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 165e3813573SMatthew D Fleming 166e3813573SMatthew D Fleming /* Pick a conservative value if provided value sucks. */ 167e3813573SMatthew D Fleming if ((vm_memguard_divisor <= 0) || 168e3813573SMatthew D Fleming ((km_size / vm_memguard_divisor) == 0)) 169e3813573SMatthew D Fleming vm_memguard_divisor = 10; 170e4eb384bSBosko Milekic /* 171e3813573SMatthew D Fleming * Limit consumption of physical pages to 172e3813573SMatthew D Fleming * 1/vm_memguard_divisor of system memory. If the KVA is 173e3813573SMatthew D Fleming * smaller than this then the KVA limit comes into play first. 174e3813573SMatthew D Fleming * This prevents memguard's page promotions from completely 175e3813573SMatthew D Fleming * using up memory, since most malloc(9) calls are sub-page. 176e4eb384bSBosko Milekic */ 177e3813573SMatthew D Fleming memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; 178e3813573SMatthew D Fleming /* 179e3813573SMatthew D Fleming * We want as much KVA as we can take safely. Use at most our 180e3813573SMatthew D Fleming * allotted fraction of kmem_max. Limit this to twice the 181e3813573SMatthew D Fleming * physical memory to avoid using too much memory as pagetable 182e3813573SMatthew D Fleming * pages. 183e3813573SMatthew D Fleming */ 184e3813573SMatthew D Fleming memguard_mapsize = km_max / vm_memguard_divisor; 185e3813573SMatthew D Fleming /* size must be multiple of PAGE_SIZE */ 186e3813573SMatthew D Fleming memguard_mapsize = round_page(memguard_mapsize); 187e3813573SMatthew D Fleming if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) 188e3813573SMatthew D Fleming memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; 189e3813573SMatthew D Fleming if (km_size + memguard_mapsize > km_max) 190e3813573SMatthew D Fleming return (km_max); 191e3813573SMatthew D Fleming return (km_size + memguard_mapsize); 192e3813573SMatthew D Fleming } 193e4eb384bSBosko Milekic 194e4eb384bSBosko Milekic /* 195e4eb384bSBosko Milekic * Initialize the MemGuard mock allocator. All objects from MemGuard come 196e4eb384bSBosko Milekic * out of a single VM map (contiguous chunk of address space). 197e4eb384bSBosko Milekic */ 198e4eb384bSBosko Milekic void 199e3813573SMatthew D Fleming memguard_init(vm_map_t parent_map) 200e4eb384bSBosko Milekic { 201e3813573SMatthew D Fleming vm_offset_t base, limit; 202e4eb384bSBosko Milekic 203e3813573SMatthew D Fleming memguard_map = kmem_suballoc(parent_map, &base, &limit, 204e3813573SMatthew D Fleming memguard_mapsize, FALSE); 205e4eb384bSBosko Milekic memguard_map->system_map = 1; 206e3813573SMatthew D Fleming KASSERT(memguard_mapsize == limit - base, 207e3813573SMatthew D Fleming ("Expected %lu, got %lu", (u_long)memguard_mapsize, 208e3813573SMatthew D Fleming (u_long)(limit - base))); 209e3813573SMatthew D Fleming memguard_cursor = base; 210e4eb384bSBosko Milekic 211e4eb384bSBosko Milekic printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 212e3813573SMatthew D Fleming printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); 213e3813573SMatthew D Fleming printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit); 214e3813573SMatthew D Fleming printf("\tMEMGUARD map size: %jd KBytes\n", 215e3813573SMatthew D Fleming (uintmax_t)memguard_mapsize >> 10); 216e4eb384bSBosko Milekic } 217e4eb384bSBosko Milekic 218e4eb384bSBosko Milekic /* 219e3813573SMatthew D Fleming * Run things that can't be done as early as memguard_init(). 220e3813573SMatthew D Fleming */ 221e3813573SMatthew D Fleming static void 222e3813573SMatthew D Fleming memguard_sysinit(void) 223e3813573SMatthew D Fleming { 224e3813573SMatthew D Fleming struct sysctl_oid_list *parent; 225e3813573SMatthew D Fleming 226e3813573SMatthew D Fleming parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 227e3813573SMatthew D Fleming 228e3813573SMatthew D Fleming SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD, 229e3813573SMatthew D Fleming &memguard_map->min_offset, "MemGuard KVA base"); 230e3813573SMatthew D Fleming SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD, 231e3813573SMatthew D Fleming &memguard_map->max_offset, "MemGuard KVA end"); 232e3813573SMatthew D Fleming SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD, 233e3813573SMatthew D Fleming &memguard_map->size, "MemGuard KVA used"); 234e3813573SMatthew D Fleming } 235e3813573SMatthew D Fleming SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 236e3813573SMatthew D Fleming 237e3813573SMatthew D Fleming /* 238e3813573SMatthew D Fleming * v2sizep() converts a virtual address of the first page allocated for 239e3813573SMatthew D Fleming * an item to a pointer to u_long recording the size of the original 240e3813573SMatthew D Fleming * allocation request. 241e3813573SMatthew D Fleming * 242e3813573SMatthew D Fleming * This routine is very similar to those defined by UMA in uma_int.h. 243e3813573SMatthew D Fleming * The difference is that this routine stores the originally allocated 244e3813573SMatthew D Fleming * size in one of the page's fields that is unused when the page is 245e3813573SMatthew D Fleming * wired rather than the object field, which is used. 246e3813573SMatthew D Fleming */ 247e3813573SMatthew D Fleming static u_long * 248e3813573SMatthew D Fleming v2sizep(vm_offset_t va) 249e3813573SMatthew D Fleming { 250e3813573SMatthew D Fleming struct vm_page *p; 251e3813573SMatthew D Fleming 252e3813573SMatthew D Fleming p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 253e3813573SMatthew D Fleming KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 254e3813573SMatthew D Fleming ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 255e3813573SMatthew D Fleming return ((u_long *)&p->pageq.tqe_next); 256e3813573SMatthew D Fleming } 257e3813573SMatthew D Fleming 258e3813573SMatthew D Fleming /* 259e3813573SMatthew D Fleming * Allocate a single object of specified size with specified flags 260e3813573SMatthew D Fleming * (either M_WAITOK or M_NOWAIT). 261e4eb384bSBosko Milekic */ 262e4eb384bSBosko Milekic void * 263e3813573SMatthew D Fleming memguard_alloc(unsigned long req_size, int flags) 264e4eb384bSBosko Milekic { 265e3813573SMatthew D Fleming vm_offset_t addr; 266e3813573SMatthew D Fleming u_long size_p, size_v; 267e3813573SMatthew D Fleming int do_guard, rv; 268e4eb384bSBosko Milekic 269e3813573SMatthew D Fleming size_p = round_page(req_size); 270e3813573SMatthew D Fleming if (size_p == 0) 271e3813573SMatthew D Fleming return (NULL); 272e4eb384bSBosko Milekic /* 273e3813573SMatthew D Fleming * To ensure there are holes on both sides of the allocation, 274e3813573SMatthew D Fleming * request 2 extra pages of KVA. We will only actually add a 275e3813573SMatthew D Fleming * vm_map_entry and get pages for the original request. Save 276e3813573SMatthew D Fleming * the value of memguard_options so we have a consistent 277e3813573SMatthew D Fleming * value. 278e4eb384bSBosko Milekic */ 279e3813573SMatthew D Fleming size_v = size_p; 280e3813573SMatthew D Fleming do_guard = (memguard_options & MG_GUARD) != 0; 281e3813573SMatthew D Fleming if (do_guard) 282e3813573SMatthew D Fleming size_v += 2 * PAGE_SIZE; 283e4eb384bSBosko Milekic 284e3813573SMatthew D Fleming vm_map_lock(memguard_map); 285e3813573SMatthew D Fleming /* 286e3813573SMatthew D Fleming * When we pass our memory limit, reject sub-page allocations. 287e3813573SMatthew D Fleming * Page-size and larger allocations will use the same amount 288e3813573SMatthew D Fleming * of physical memory whether we allocate or hand off to 289e3813573SMatthew D Fleming * uma_large_alloc(), so keep those. 290e3813573SMatthew D Fleming */ 291e3813573SMatthew D Fleming if (memguard_map->size >= memguard_physlimit && 292e3813573SMatthew D Fleming req_size < PAGE_SIZE) { 293e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 294e3813573SMatthew D Fleming memguard_fail_pgs++; 295e3813573SMatthew D Fleming goto out; 296e4eb384bSBosko Milekic } 297e3813573SMatthew D Fleming /* 298e3813573SMatthew D Fleming * Keep a moving cursor so we don't recycle KVA as long as 299e3813573SMatthew D Fleming * possible. It's not perfect, since we don't know in what 300e3813573SMatthew D Fleming * order previous allocations will be free'd, but it's simple 301e3813573SMatthew D Fleming * and fast, and requires O(1) additional storage if guard 302e3813573SMatthew D Fleming * pages are not used. 303e3813573SMatthew D Fleming * 304e3813573SMatthew D Fleming * XXX This scheme will lead to greater fragmentation of the 305e3813573SMatthew D Fleming * map, unless vm_map_findspace() is tweaked. 306e3813573SMatthew D Fleming */ 307e3813573SMatthew D Fleming for (;;) { 308e3813573SMatthew D Fleming rv = vm_map_findspace(memguard_map, memguard_cursor, 309e3813573SMatthew D Fleming size_v, &addr); 310e3813573SMatthew D Fleming if (rv == KERN_SUCCESS) 311e3813573SMatthew D Fleming break; 312e3813573SMatthew D Fleming /* 313e3813573SMatthew D Fleming * The map has no space. This may be due to 314e3813573SMatthew D Fleming * fragmentation, or because the cursor is near the 315e3813573SMatthew D Fleming * end of the map. 316e3813573SMatthew D Fleming */ 317e3813573SMatthew D Fleming if (memguard_cursor == vm_map_min(memguard_map)) { 318e3813573SMatthew D Fleming memguard_fail_kva++; 319e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 320e3813573SMatthew D Fleming goto out; 321e3813573SMatthew D Fleming } 322e3813573SMatthew D Fleming memguard_wrap++; 323e3813573SMatthew D Fleming memguard_cursor = vm_map_min(memguard_map); 324e3813573SMatthew D Fleming } 325e3813573SMatthew D Fleming if (do_guard) 326e3813573SMatthew D Fleming addr += PAGE_SIZE; 327e3813573SMatthew D Fleming rv = kmem_back(memguard_map, addr, size_p, flags); 328e3813573SMatthew D Fleming if (rv != KERN_SUCCESS) { 329e3813573SMatthew D Fleming memguard_fail_pgs++; 330e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 331e3813573SMatthew D Fleming goto out; 332e3813573SMatthew D Fleming } 333e3813573SMatthew D Fleming memguard_cursor = addr + size_p; 334e3813573SMatthew D Fleming *v2sizep(trunc_page(addr)) = req_size; 335e3813573SMatthew D Fleming memguard_succ++; 336e3813573SMatthew D Fleming if (req_size < PAGE_SIZE) { 337e3813573SMatthew D Fleming memguard_wasted += (PAGE_SIZE - req_size); 338e3813573SMatthew D Fleming if (do_guard) { 339e3813573SMatthew D Fleming /* 340e3813573SMatthew D Fleming * Align the request to 16 bytes, and return 341e3813573SMatthew D Fleming * an address near the end of the page, to 342e3813573SMatthew D Fleming * better detect array overrun. 343e3813573SMatthew D Fleming */ 344e3813573SMatthew D Fleming req_size = roundup2(req_size, 16); 345e3813573SMatthew D Fleming addr += (PAGE_SIZE - req_size); 346e3813573SMatthew D Fleming } 347e3813573SMatthew D Fleming } 348e3813573SMatthew D Fleming out: 349e3813573SMatthew D Fleming vm_map_unlock(memguard_map); 350e3813573SMatthew D Fleming return ((void *)addr); 351e3813573SMatthew D Fleming } 352e3813573SMatthew D Fleming 353e3813573SMatthew D Fleming int 354e3813573SMatthew D Fleming is_memguard_addr(void *addr) 355e3813573SMatthew D Fleming { 356e3813573SMatthew D Fleming vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 357e3813573SMatthew D Fleming 358e3813573SMatthew D Fleming return (a >= memguard_map->min_offset && a < memguard_map->max_offset); 359e4eb384bSBosko Milekic } 360e4eb384bSBosko Milekic 361e4eb384bSBosko Milekic /* 362e4eb384bSBosko Milekic * Free specified single object. 363e4eb384bSBosko Milekic */ 364e4eb384bSBosko Milekic void 365e3813573SMatthew D Fleming memguard_free(void *ptr) 366e4eb384bSBosko Milekic { 367e3813573SMatthew D Fleming vm_offset_t addr; 368e3813573SMatthew D Fleming u_long req_size, size; 369e3813573SMatthew D Fleming char *temp; 370e3813573SMatthew D Fleming int i; 371e4eb384bSBosko Milekic 372e3813573SMatthew D Fleming addr = trunc_page((uintptr_t)ptr); 373e3813573SMatthew D Fleming req_size = *v2sizep(addr); 374e3813573SMatthew D Fleming size = round_page(req_size); 37503412565SBosko Milekic 37603412565SBosko Milekic /* 377e3813573SMatthew D Fleming * Page should not be guarded right now, so force a write. 378e3813573SMatthew D Fleming * The purpose of this is to increase the likelihood of 379e3813573SMatthew D Fleming * catching a double-free, but not necessarily a 380e3813573SMatthew D Fleming * tamper-after-free (the second thread freeing might not 381e3813573SMatthew D Fleming * write before freeing, so this forces it to and, 382e3813573SMatthew D Fleming * subsequently, trigger a fault). 38303412565SBosko Milekic */ 384e3813573SMatthew D Fleming temp = ptr; 385e3813573SMatthew D Fleming for (i = 0; i < size; i += PAGE_SIZE) 386e3813573SMatthew D Fleming temp[i] = 'M'; 38703412565SBosko Milekic 388e3813573SMatthew D Fleming /* 389e3813573SMatthew D Fleming * This requires carnal knowledge of the implementation of 390e3813573SMatthew D Fleming * kmem_free(), but since we've already replaced kmem_malloc() 391e3813573SMatthew D Fleming * above, it's not really any worse. We want to use the 392e3813573SMatthew D Fleming * vm_map lock to serialize updates to memguard_wasted, since 393e3813573SMatthew D Fleming * we had the lock at increment. 394e3813573SMatthew D Fleming */ 395e3813573SMatthew D Fleming vm_map_lock(memguard_map); 396e3813573SMatthew D Fleming if (req_size < PAGE_SIZE) 397e3813573SMatthew D Fleming memguard_wasted -= (PAGE_SIZE - req_size); 398e3813573SMatthew D Fleming (void)vm_map_delete(memguard_map, addr, addr + size); 399e3813573SMatthew D Fleming vm_map_unlock(memguard_map); 400e4eb384bSBosko Milekic } 401e4eb384bSBosko Milekic 402d362c40dSPawel Jakub Dawidek int 403e3813573SMatthew D Fleming memguard_cmp(struct malloc_type *mtp, unsigned long size) 404d362c40dSPawel Jakub Dawidek { 405d362c40dSPawel Jakub Dawidek 406e3813573SMatthew D Fleming if (size < memguard_minsize) { 407e3813573SMatthew D Fleming memguard_minsize_reject++; 408e3813573SMatthew D Fleming return (0); 409e3813573SMatthew D Fleming } 410e3813573SMatthew D Fleming if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE) 411e3813573SMatthew D Fleming return (1); 412e3813573SMatthew D Fleming if (memguard_frequency > 0 && 413e3813573SMatthew D Fleming (random() % 100000) < memguard_frequency) { 414e3813573SMatthew D Fleming memguard_frequency_hits++; 415e3813573SMatthew D Fleming return (1); 416e3813573SMatthew D Fleming } 417d362c40dSPawel Jakub Dawidek #if 1 418d362c40dSPawel Jakub Dawidek /* 419d362c40dSPawel Jakub Dawidek * The safest way of comparsion is to always compare short description 420d362c40dSPawel Jakub Dawidek * string of memory type, but it is also the slowest way. 421d362c40dSPawel Jakub Dawidek */ 422d362c40dSPawel Jakub Dawidek return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 423d362c40dSPawel Jakub Dawidek #else 424d362c40dSPawel Jakub Dawidek /* 425d362c40dSPawel Jakub Dawidek * If we compare pointers, there are two possible problems: 426d362c40dSPawel Jakub Dawidek * 1. Memory type was unloaded and new memory type was allocated at the 427d362c40dSPawel Jakub Dawidek * same address. 428d362c40dSPawel Jakub Dawidek * 2. Memory type was unloaded and loaded again, but allocated at a 429d362c40dSPawel Jakub Dawidek * different address. 430d362c40dSPawel Jakub Dawidek */ 431d362c40dSPawel Jakub Dawidek if (vm_memguard_mtype != NULL) 432d362c40dSPawel Jakub Dawidek return (mtp == vm_memguard_mtype); 433d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 434d362c40dSPawel Jakub Dawidek vm_memguard_mtype = mtp; 435d362c40dSPawel Jakub Dawidek return (1); 436d362c40dSPawel Jakub Dawidek } 437d362c40dSPawel Jakub Dawidek return (0); 438d362c40dSPawel Jakub Dawidek #endif 439d362c40dSPawel Jakub Dawidek } 440