1c0587701SJoel Dahl /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4e3813573SMatthew D Fleming * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 5e3813573SMatthew D Fleming * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 6e3813573SMatthew D Fleming * All rights reserved. 7e4eb384bSBosko Milekic * 8e4eb384bSBosko Milekic * Redistribution and use in source and binary forms, with or without 9e4eb384bSBosko Milekic * modification, are permitted provided that the following conditions 10e4eb384bSBosko Milekic * are met: 11e4eb384bSBosko Milekic * 1. Redistributions of source code must retain the above copyright 12e4eb384bSBosko Milekic * notice unmodified, this list of conditions, and the following 13e4eb384bSBosko Milekic * disclaimer. 14e4eb384bSBosko Milekic * 2. Redistributions in binary form must reproduce the above copyright 15e4eb384bSBosko Milekic * notice, this list of conditions and the following disclaimer in the 16e4eb384bSBosko Milekic * documentation and/or other materials provided with the distribution. 17e4eb384bSBosko Milekic * 18e4eb384bSBosko Milekic * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19e4eb384bSBosko Milekic * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20e4eb384bSBosko Milekic * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21e4eb384bSBosko Milekic * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22e4eb384bSBosko Milekic * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23e4eb384bSBosko Milekic * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24e4eb384bSBosko Milekic * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25e4eb384bSBosko Milekic * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26e4eb384bSBosko Milekic * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27e4eb384bSBosko Milekic * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28e4eb384bSBosko Milekic */ 29e4eb384bSBosko Milekic 30e4eb384bSBosko Milekic #include <sys/cdefs.h> 31e4eb384bSBosko Milekic __FBSDID("$FreeBSD$"); 32e4eb384bSBosko Milekic 33e4eb384bSBosko Milekic /* 34e4eb384bSBosko Milekic * MemGuard is a simple replacement allocator for debugging only 35e4eb384bSBosko Milekic * which provides ElectricFence-style memory barrier protection on 36e4eb384bSBosko Milekic * objects being allocated, and is used to detect tampering-after-free 37e4eb384bSBosko Milekic * scenarios. 38e4eb384bSBosko Milekic * 39e4eb384bSBosko Milekic * See the memguard(9) man page for more information on using MemGuard. 40e4eb384bSBosko Milekic */ 41e4eb384bSBosko Milekic 42f02d86e2SMatthew D Fleming #include "opt_vm.h" 43f02d86e2SMatthew D Fleming 44e4eb384bSBosko Milekic #include <sys/param.h> 45e4eb384bSBosko Milekic #include <sys/systm.h> 46e4eb384bSBosko Milekic #include <sys/kernel.h> 47e4eb384bSBosko Milekic #include <sys/types.h> 48e4eb384bSBosko Milekic #include <sys/queue.h> 49e4eb384bSBosko Milekic #include <sys/lock.h> 50e4eb384bSBosko Milekic #include <sys/mutex.h> 51e4eb384bSBosko Milekic #include <sys/malloc.h> 52d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h> 535df87b21SJeff Roberson #include <sys/vmem.h> 549ed01c32SGleb Smirnoff #include <sys/vmmeter.h> 55e4eb384bSBosko Milekic 56e4eb384bSBosko Milekic #include <vm/vm.h> 57e3813573SMatthew D Fleming #include <vm/uma.h> 5803412565SBosko Milekic #include <vm/vm_param.h> 59e4eb384bSBosko Milekic #include <vm/vm_page.h> 60e4eb384bSBosko Milekic #include <vm/vm_map.h> 61e3813573SMatthew D Fleming #include <vm/vm_object.h> 628441d1e8SJeff Roberson #include <vm/vm_kern.h> 63e4eb384bSBosko Milekic #include <vm/vm_extern.h> 648d689e04SGleb Smirnoff #include <vm/uma_int.h> 65e4eb384bSBosko Milekic #include <vm/memguard.h> 66e4eb384bSBosko Milekic 676472ac3dSEd Schouten static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 68d362c40dSPawel Jakub Dawidek /* 692e47807cSJeff Roberson * The vm_memguard_divisor variable controls how much of kernel_arena should be 70d362c40dSPawel Jakub Dawidek * reserved for MemGuard. 71d362c40dSPawel Jakub Dawidek */ 72e3813573SMatthew D Fleming static u_int vm_memguard_divisor; 73af3b2549SHans Petter Selasky SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 74e3813573SMatthew D Fleming &vm_memguard_divisor, 75d362c40dSPawel Jakub Dawidek 0, "(kmem_size/memguard_divisor) == memguard submap size"); 76d362c40dSPawel Jakub Dawidek 77d362c40dSPawel Jakub Dawidek /* 78d362c40dSPawel Jakub Dawidek * Short description (ks_shortdesc) of memory type to monitor. 79d362c40dSPawel Jakub Dawidek */ 80d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = ""; 81d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL; 82d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); 83d362c40dSPawel Jakub Dawidek static int 84d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) 85d362c40dSPawel Jakub Dawidek { 86e3813573SMatthew D Fleming char desc[sizeof(vm_memguard_desc)]; 87e3813573SMatthew D Fleming int error; 88d362c40dSPawel Jakub Dawidek 89d362c40dSPawel Jakub Dawidek strlcpy(desc, vm_memguard_desc, sizeof(desc)); 90d362c40dSPawel Jakub Dawidek error = sysctl_handle_string(oidp, desc, sizeof(desc), req); 91d362c40dSPawel Jakub Dawidek if (error != 0 || req->newptr == NULL) 92d362c40dSPawel Jakub Dawidek return (error); 93d362c40dSPawel Jakub Dawidek 94d362c40dSPawel Jakub Dawidek mtx_lock(&malloc_mtx); 95b2ecae3fSEnji Cooper /* If mtp is NULL, it will be initialized in memguard_cmp() */ 96e3813573SMatthew D Fleming vm_memguard_mtype = malloc_desc2type(desc); 97d362c40dSPawel Jakub Dawidek strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 98d362c40dSPawel Jakub Dawidek mtx_unlock(&malloc_mtx); 99d362c40dSPawel Jakub Dawidek return (error); 100d362c40dSPawel Jakub Dawidek } 101e3813573SMatthew D Fleming SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 102e3813573SMatthew D Fleming CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 103d362c40dSPawel Jakub Dawidek memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 104d362c40dSPawel Jakub Dawidek 105ccc5d6ddSMark Johnston static int 106ccc5d6ddSMark Johnston memguard_sysctl_mapused(SYSCTL_HANDLER_ARGS) 107ccc5d6ddSMark Johnston { 108ccc5d6ddSMark Johnston vmem_size_t size; 109ccc5d6ddSMark Johnston 110ccc5d6ddSMark Johnston size = vmem_size(memguard_arena, VMEM_ALLOC); 111ccc5d6ddSMark Johnston return (sysctl_handle_long(oidp, &size, sizeof(size), req)); 112ccc5d6ddSMark Johnston } 113ccc5d6ddSMark Johnston 1145df87b21SJeff Roberson static vm_offset_t memguard_base; 115e3813573SMatthew D Fleming static vm_size_t memguard_mapsize; 116e3813573SMatthew D Fleming static vm_size_t memguard_physlimit; 117e3813573SMatthew D Fleming static u_long memguard_wasted; 118e3813573SMatthew D Fleming static u_long memguard_succ; 119e3813573SMatthew D Fleming static u_long memguard_fail_kva; 120e3813573SMatthew D Fleming static u_long memguard_fail_pgs; 121e3813573SMatthew D Fleming 122e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, 1235df87b21SJeff Roberson &memguard_mapsize, 0, "MemGuard private arena size"); 124e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 125e3813573SMatthew D Fleming &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 126e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 127e3813573SMatthew D Fleming &memguard_wasted, 0, "Excess memory used through page promotion"); 128e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 129e3813573SMatthew D Fleming &memguard_succ, 0, "Count of successful MemGuard allocations"); 130e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 131e3813573SMatthew D Fleming &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 132e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 133e3813573SMatthew D Fleming &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 134e3813573SMatthew D Fleming 1358d689e04SGleb Smirnoff #define MG_GUARD_AROUND 0x001 1368d689e04SGleb Smirnoff #define MG_GUARD_ALLLARGE 0x002 1378d689e04SGleb Smirnoff #define MG_GUARD_NOFREE 0x004 1388d689e04SGleb Smirnoff static int memguard_options = MG_GUARD_AROUND; 139af3b2549SHans Petter Selasky SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN, 140e3813573SMatthew D Fleming &memguard_options, 0, 141e3813573SMatthew D Fleming "MemGuard options:\n" 142e3813573SMatthew D Fleming "\t0x001 - add guard pages around each allocation\n" 1438d689e04SGleb Smirnoff "\t0x002 - always use MemGuard for allocations over a page\n" 1448d689e04SGleb Smirnoff "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag"); 145e3813573SMatthew D Fleming 146e3813573SMatthew D Fleming static u_int memguard_minsize; 147e3813573SMatthew D Fleming static u_long memguard_minsize_reject; 148e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 149e3813573SMatthew D Fleming &memguard_minsize, 0, "Minimum size for page promotion"); 150e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 151e3813573SMatthew D Fleming &memguard_minsize_reject, 0, "# times rejected for size"); 152e3813573SMatthew D Fleming 153e3813573SMatthew D Fleming static u_int memguard_frequency; 154e3813573SMatthew D Fleming static u_long memguard_frequency_hits; 155af3b2549SHans Petter Selasky SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN, 156e3813573SMatthew D Fleming &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run"); 157e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, 158e3813573SMatthew D Fleming &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); 159e3813573SMatthew D Fleming 160e4eb384bSBosko Milekic 161e4eb384bSBosko Milekic /* 162e3813573SMatthew D Fleming * Return a fudged value to be used for vm_kmem_size for allocating 163ccc5d6ddSMark Johnston * the kernel_arena. 164e4eb384bSBosko Milekic */ 165e3813573SMatthew D Fleming unsigned long 166f806cdcfSMatthew D Fleming memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) 167e3813573SMatthew D Fleming { 168f806cdcfSMatthew D Fleming u_long mem_pgs, parent_size; 169e4eb384bSBosko Milekic 170e3813573SMatthew D Fleming vm_memguard_divisor = 10; 171b575067aSRui Paulo /* CTFLAG_RDTUN doesn't work during the early boot process. */ 172e3813573SMatthew D Fleming TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 173e3813573SMatthew D Fleming 174f806cdcfSMatthew D Fleming parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + 175f806cdcfSMatthew D Fleming PAGE_SIZE; 176e3813573SMatthew D Fleming /* Pick a conservative value if provided value sucks. */ 177e3813573SMatthew D Fleming if ((vm_memguard_divisor <= 0) || 178f806cdcfSMatthew D Fleming ((parent_size / vm_memguard_divisor) == 0)) 179e3813573SMatthew D Fleming vm_memguard_divisor = 10; 180e4eb384bSBosko Milekic /* 181e3813573SMatthew D Fleming * Limit consumption of physical pages to 182e3813573SMatthew D Fleming * 1/vm_memguard_divisor of system memory. If the KVA is 183e3813573SMatthew D Fleming * smaller than this then the KVA limit comes into play first. 184e3813573SMatthew D Fleming * This prevents memguard's page promotions from completely 185e3813573SMatthew D Fleming * using up memory, since most malloc(9) calls are sub-page. 186e4eb384bSBosko Milekic */ 18744f1c916SBryan Drewery mem_pgs = vm_cnt.v_page_count; 188e3813573SMatthew D Fleming memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; 189e3813573SMatthew D Fleming /* 190e3813573SMatthew D Fleming * We want as much KVA as we can take safely. Use at most our 191f806cdcfSMatthew D Fleming * allotted fraction of the parent map's size. Limit this to 192f806cdcfSMatthew D Fleming * twice the physical memory to avoid using too much memory as 193f806cdcfSMatthew D Fleming * pagetable pages (size must be multiple of PAGE_SIZE). 194e3813573SMatthew D Fleming */ 195f806cdcfSMatthew D Fleming memguard_mapsize = round_page(parent_size / vm_memguard_divisor); 196f806cdcfSMatthew D Fleming if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) 197e3813573SMatthew D Fleming memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; 198f806cdcfSMatthew D Fleming if (km_size + memguard_mapsize > parent_size) 199f806cdcfSMatthew D Fleming memguard_mapsize = 0; 200e3813573SMatthew D Fleming return (km_size + memguard_mapsize); 201e3813573SMatthew D Fleming } 202e4eb384bSBosko Milekic 203e4eb384bSBosko Milekic /* 204e4eb384bSBosko Milekic * Initialize the MemGuard mock allocator. All objects from MemGuard come 205ccc5d6ddSMark Johnston * out of a single contiguous chunk of kernel address space that is managed 206ccc5d6ddSMark Johnston * by a vmem arena. 207e4eb384bSBosko Milekic */ 208e4eb384bSBosko Milekic void 2095df87b21SJeff Roberson memguard_init(vmem_t *parent) 210e4eb384bSBosko Milekic { 2115df87b21SJeff Roberson vm_offset_t base; 212e4eb384bSBosko Milekic 2138441d1e8SJeff Roberson vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base); 2148441d1e8SJeff Roberson vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize, 2155df87b21SJeff Roberson PAGE_SIZE, 0, M_WAITOK); 2165df87b21SJeff Roberson memguard_base = base; 217e4eb384bSBosko Milekic 218e4eb384bSBosko Milekic printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 219e3813573SMatthew D Fleming printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); 220e3813573SMatthew D Fleming printf("\tMEMGUARD map size: %jd KBytes\n", 221e3813573SMatthew D Fleming (uintmax_t)memguard_mapsize >> 10); 222e4eb384bSBosko Milekic } 223e4eb384bSBosko Milekic 224e4eb384bSBosko Milekic /* 225e3813573SMatthew D Fleming * Run things that can't be done as early as memguard_init(). 226e3813573SMatthew D Fleming */ 227e3813573SMatthew D Fleming static void 228e3813573SMatthew D Fleming memguard_sysinit(void) 229e3813573SMatthew D Fleming { 230e3813573SMatthew D Fleming struct sysctl_oid_list *parent; 231e3813573SMatthew D Fleming 232e3813573SMatthew D Fleming parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 233ccc5d6ddSMark Johnston SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", 234ccc5d6ddSMark Johnston CTLFLAG_RD, &memguard_base, 235ccc5d6ddSMark Johnston "MemGuard KVA base"); 236ccc5d6ddSMark Johnston SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", 237ccc5d6ddSMark Johnston CTLFLAG_RD, &memguard_mapsize, 238ccc5d6ddSMark Johnston "MemGuard KVA size"); 239ccc5d6ddSMark Johnston SYSCTL_ADD_PROC(NULL, parent, OID_AUTO, "mapused", 240*a314aba8SMateusz Guzik CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_ULONG, NULL, 0, memguard_sysctl_mapused, "LU", 241ccc5d6ddSMark Johnston "MemGuard KVA used"); 242e3813573SMatthew D Fleming } 243e3813573SMatthew D Fleming SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 244e3813573SMatthew D Fleming 245e3813573SMatthew D Fleming /* 246e3813573SMatthew D Fleming * v2sizep() converts a virtual address of the first page allocated for 247e3813573SMatthew D Fleming * an item to a pointer to u_long recording the size of the original 248e3813573SMatthew D Fleming * allocation request. 249e3813573SMatthew D Fleming * 250e3813573SMatthew D Fleming * This routine is very similar to those defined by UMA in uma_int.h. 251e3813573SMatthew D Fleming * The difference is that this routine stores the originally allocated 252e3813573SMatthew D Fleming * size in one of the page's fields that is unused when the page is 253e3813573SMatthew D Fleming * wired rather than the object field, which is used. 254e3813573SMatthew D Fleming */ 255e3813573SMatthew D Fleming static u_long * 256e3813573SMatthew D Fleming v2sizep(vm_offset_t va) 257e3813573SMatthew D Fleming { 258a2a200a2SMatthew D Fleming vm_paddr_t pa; 259e3813573SMatthew D Fleming struct vm_page *p; 260e3813573SMatthew D Fleming 261a2a200a2SMatthew D Fleming pa = pmap_kextract(va); 262a2a200a2SMatthew D Fleming if (pa == 0) 263a2a200a2SMatthew D Fleming panic("MemGuard detected double-free of %p", (void *)va); 264a2a200a2SMatthew D Fleming p = PHYS_TO_VM_PAGE(pa); 2655cff1f4dSMark Johnston KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE, 266e3813573SMatthew D Fleming ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 267c325e866SKonstantin Belousov return (&p->plinks.memguard.p); 268e3813573SMatthew D Fleming } 269e3813573SMatthew D Fleming 2705df87b21SJeff Roberson static u_long * 2715df87b21SJeff Roberson v2sizev(vm_offset_t va) 2725df87b21SJeff Roberson { 2735df87b21SJeff Roberson vm_paddr_t pa; 2745df87b21SJeff Roberson struct vm_page *p; 2755df87b21SJeff Roberson 2765df87b21SJeff Roberson pa = pmap_kextract(va); 2775df87b21SJeff Roberson if (pa == 0) 2785df87b21SJeff Roberson panic("MemGuard detected double-free of %p", (void *)va); 2795df87b21SJeff Roberson p = PHYS_TO_VM_PAGE(pa); 2805cff1f4dSMark Johnston KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE, 2815df87b21SJeff Roberson ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 282c325e866SKonstantin Belousov return (&p->plinks.memguard.v); 2835df87b21SJeff Roberson } 2845df87b21SJeff Roberson 285e3813573SMatthew D Fleming /* 286e3813573SMatthew D Fleming * Allocate a single object of specified size with specified flags 287e3813573SMatthew D Fleming * (either M_WAITOK or M_NOWAIT). 288e4eb384bSBosko Milekic */ 289e4eb384bSBosko Milekic void * 290e3813573SMatthew D Fleming memguard_alloc(unsigned long req_size, int flags) 291e4eb384bSBosko Milekic { 292eadbeae5SMark Johnston vm_offset_t addr, origaddr; 293e3813573SMatthew D Fleming u_long size_p, size_v; 294ccc5d6ddSMark Johnston int do_guard, error, rv; 295e4eb384bSBosko Milekic 296e3813573SMatthew D Fleming size_p = round_page(req_size); 297e3813573SMatthew D Fleming if (size_p == 0) 298e3813573SMatthew D Fleming return (NULL); 299ccc5d6ddSMark Johnston 300e4eb384bSBosko Milekic /* 301e3813573SMatthew D Fleming * To ensure there are holes on both sides of the allocation, 302ccc5d6ddSMark Johnston * request 2 extra pages of KVA. Save the value of memguard_options 303ccc5d6ddSMark Johnston * so that we use a consistent value throughout this function. 304e4eb384bSBosko Milekic */ 305e3813573SMatthew D Fleming size_v = size_p; 3068d689e04SGleb Smirnoff do_guard = (memguard_options & MG_GUARD_AROUND) != 0; 307e3813573SMatthew D Fleming if (do_guard) 308e3813573SMatthew D Fleming size_v += 2 * PAGE_SIZE; 309e4eb384bSBosko Milekic 310e3813573SMatthew D Fleming /* 311e3813573SMatthew D Fleming * When we pass our memory limit, reject sub-page allocations. 312e3813573SMatthew D Fleming * Page-size and larger allocations will use the same amount 313e3813573SMatthew D Fleming * of physical memory whether we allocate or hand off to 3146d6a03d7SJeff Roberson * malloc_large(), so keep those. 315e3813573SMatthew D Fleming */ 3168441d1e8SJeff Roberson if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit && 317e3813573SMatthew D Fleming req_size < PAGE_SIZE) { 318e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 319e3813573SMatthew D Fleming memguard_fail_pgs++; 320e3813573SMatthew D Fleming goto out; 321e4eb384bSBosko Milekic } 322ccc5d6ddSMark Johnston 323e3813573SMatthew D Fleming /* 324ccc5d6ddSMark Johnston * Attempt to avoid address reuse for as long as possible, to increase 325ccc5d6ddSMark Johnston * the likelihood of catching a use-after-free. 326e3813573SMatthew D Fleming */ 327ccc5d6ddSMark Johnston error = vmem_alloc(memguard_arena, size_v, M_NEXTFIT | M_NOWAIT, 328ccc5d6ddSMark Johnston &origaddr); 329ccc5d6ddSMark Johnston if (error != 0) { 330e3813573SMatthew D Fleming memguard_fail_kva++; 331e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 332e3813573SMatthew D Fleming goto out; 333e3813573SMatthew D Fleming } 334eadbeae5SMark Johnston addr = origaddr; 335e3813573SMatthew D Fleming if (do_guard) 336e3813573SMatthew D Fleming addr += PAGE_SIZE; 3372e47807cSJeff Roberson rv = kmem_back(kernel_object, addr, size_p, flags); 338e3813573SMatthew D Fleming if (rv != KERN_SUCCESS) { 339eadbeae5SMark Johnston vmem_xfree(memguard_arena, origaddr, size_v); 340e3813573SMatthew D Fleming memguard_fail_pgs++; 341e3813573SMatthew D Fleming addr = (vm_offset_t)NULL; 342e3813573SMatthew D Fleming goto out; 343e3813573SMatthew D Fleming } 344e3813573SMatthew D Fleming *v2sizep(trunc_page(addr)) = req_size; 3455df87b21SJeff Roberson *v2sizev(trunc_page(addr)) = size_v; 346e3813573SMatthew D Fleming memguard_succ++; 347e3813573SMatthew D Fleming if (req_size < PAGE_SIZE) { 348e3813573SMatthew D Fleming memguard_wasted += (PAGE_SIZE - req_size); 349e3813573SMatthew D Fleming if (do_guard) { 350e3813573SMatthew D Fleming /* 351e3813573SMatthew D Fleming * Align the request to 16 bytes, and return 352e3813573SMatthew D Fleming * an address near the end of the page, to 353e3813573SMatthew D Fleming * better detect array overrun. 354e3813573SMatthew D Fleming */ 355e3813573SMatthew D Fleming req_size = roundup2(req_size, 16); 356e3813573SMatthew D Fleming addr += (PAGE_SIZE - req_size); 357e3813573SMatthew D Fleming } 358e3813573SMatthew D Fleming } 359e3813573SMatthew D Fleming out: 360e3813573SMatthew D Fleming return ((void *)addr); 361e3813573SMatthew D Fleming } 362e3813573SMatthew D Fleming 363e3813573SMatthew D Fleming int 364e3813573SMatthew D Fleming is_memguard_addr(void *addr) 365e3813573SMatthew D Fleming { 366e3813573SMatthew D Fleming vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 367e3813573SMatthew D Fleming 3685df87b21SJeff Roberson return (a >= memguard_base && a < memguard_base + memguard_mapsize); 369e4eb384bSBosko Milekic } 370e4eb384bSBosko Milekic 371e4eb384bSBosko Milekic /* 372e4eb384bSBosko Milekic * Free specified single object. 373e4eb384bSBosko Milekic */ 374e4eb384bSBosko Milekic void 375e3813573SMatthew D Fleming memguard_free(void *ptr) 376e4eb384bSBosko Milekic { 377e3813573SMatthew D Fleming vm_offset_t addr; 3785df87b21SJeff Roberson u_long req_size, size, sizev; 379e3813573SMatthew D Fleming char *temp; 380e3813573SMatthew D Fleming int i; 381e4eb384bSBosko Milekic 382e3813573SMatthew D Fleming addr = trunc_page((uintptr_t)ptr); 383e3813573SMatthew D Fleming req_size = *v2sizep(addr); 3845df87b21SJeff Roberson sizev = *v2sizev(addr); 385e3813573SMatthew D Fleming size = round_page(req_size); 38603412565SBosko Milekic 38703412565SBosko Milekic /* 388e3813573SMatthew D Fleming * Page should not be guarded right now, so force a write. 389e3813573SMatthew D Fleming * The purpose of this is to increase the likelihood of 390e3813573SMatthew D Fleming * catching a double-free, but not necessarily a 391e3813573SMatthew D Fleming * tamper-after-free (the second thread freeing might not 392e3813573SMatthew D Fleming * write before freeing, so this forces it to and, 393e3813573SMatthew D Fleming * subsequently, trigger a fault). 39403412565SBosko Milekic */ 395e3813573SMatthew D Fleming temp = ptr; 396e3813573SMatthew D Fleming for (i = 0; i < size; i += PAGE_SIZE) 397e3813573SMatthew D Fleming temp[i] = 'M'; 39803412565SBosko Milekic 399e3813573SMatthew D Fleming /* 400e3813573SMatthew D Fleming * This requires carnal knowledge of the implementation of 401e3813573SMatthew D Fleming * kmem_free(), but since we've already replaced kmem_malloc() 402e3813573SMatthew D Fleming * above, it's not really any worse. We want to use the 403e3813573SMatthew D Fleming * vm_map lock to serialize updates to memguard_wasted, since 404e3813573SMatthew D Fleming * we had the lock at increment. 405e3813573SMatthew D Fleming */ 4062e47807cSJeff Roberson kmem_unback(kernel_object, addr, size); 4075df87b21SJeff Roberson if (sizev > size) 4085df87b21SJeff Roberson addr -= PAGE_SIZE; 4098441d1e8SJeff Roberson vmem_xfree(memguard_arena, addr, sizev); 410e3813573SMatthew D Fleming if (req_size < PAGE_SIZE) 411e3813573SMatthew D Fleming memguard_wasted -= (PAGE_SIZE - req_size); 412e4eb384bSBosko Milekic } 413e4eb384bSBosko Milekic 4146d3ed393SMatthew D Fleming /* 4156d3ed393SMatthew D Fleming * Re-allocate an allocation that was originally guarded. 4166d3ed393SMatthew D Fleming */ 4176d3ed393SMatthew D Fleming void * 4186d3ed393SMatthew D Fleming memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp, 4196d3ed393SMatthew D Fleming int flags) 4206d3ed393SMatthew D Fleming { 4216d3ed393SMatthew D Fleming void *newaddr; 4226d3ed393SMatthew D Fleming u_long old_size; 4236d3ed393SMatthew D Fleming 4246d3ed393SMatthew D Fleming /* 4256d3ed393SMatthew D Fleming * Allocate the new block. Force the allocation to be guarded 4266d3ed393SMatthew D Fleming * as the original may have been guarded through random 4276d3ed393SMatthew D Fleming * chance, and that should be preserved. 4286d3ed393SMatthew D Fleming */ 4296d3ed393SMatthew D Fleming if ((newaddr = memguard_alloc(size, flags)) == NULL) 4306d3ed393SMatthew D Fleming return (NULL); 4316d3ed393SMatthew D Fleming 4326d3ed393SMatthew D Fleming /* Copy over original contents. */ 4336d3ed393SMatthew D Fleming old_size = *v2sizep(trunc_page((uintptr_t)addr)); 4346d3ed393SMatthew D Fleming bcopy(addr, newaddr, min(size, old_size)); 4356d3ed393SMatthew D Fleming memguard_free(addr); 4366d3ed393SMatthew D Fleming return (newaddr); 4376d3ed393SMatthew D Fleming } 4386d3ed393SMatthew D Fleming 4398d689e04SGleb Smirnoff static int 4408d689e04SGleb Smirnoff memguard_cmp(unsigned long size) 441d362c40dSPawel Jakub Dawidek { 442d362c40dSPawel Jakub Dawidek 443e3813573SMatthew D Fleming if (size < memguard_minsize) { 444e3813573SMatthew D Fleming memguard_minsize_reject++; 445e3813573SMatthew D Fleming return (0); 446e3813573SMatthew D Fleming } 4478d689e04SGleb Smirnoff if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE) 448e3813573SMatthew D Fleming return (1); 449e3813573SMatthew D Fleming if (memguard_frequency > 0 && 450e3813573SMatthew D Fleming (random() % 100000) < memguard_frequency) { 451e3813573SMatthew D Fleming memguard_frequency_hits++; 452e3813573SMatthew D Fleming return (1); 453e3813573SMatthew D Fleming } 4548d689e04SGleb Smirnoff 4558d689e04SGleb Smirnoff return (0); 4568d689e04SGleb Smirnoff } 4578d689e04SGleb Smirnoff 4588d689e04SGleb Smirnoff int 4598d689e04SGleb Smirnoff memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size) 4608d689e04SGleb Smirnoff { 4618d689e04SGleb Smirnoff 4628d689e04SGleb Smirnoff if (memguard_cmp(size)) 4638d689e04SGleb Smirnoff return(1); 4648d689e04SGleb Smirnoff 465d362c40dSPawel Jakub Dawidek #if 1 466d362c40dSPawel Jakub Dawidek /* 467d362c40dSPawel Jakub Dawidek * The safest way of comparsion is to always compare short description 468d362c40dSPawel Jakub Dawidek * string of memory type, but it is also the slowest way. 469d362c40dSPawel Jakub Dawidek */ 470d362c40dSPawel Jakub Dawidek return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 471d362c40dSPawel Jakub Dawidek #else 472d362c40dSPawel Jakub Dawidek /* 473d362c40dSPawel Jakub Dawidek * If we compare pointers, there are two possible problems: 474d362c40dSPawel Jakub Dawidek * 1. Memory type was unloaded and new memory type was allocated at the 475d362c40dSPawel Jakub Dawidek * same address. 476d362c40dSPawel Jakub Dawidek * 2. Memory type was unloaded and loaded again, but allocated at a 477d362c40dSPawel Jakub Dawidek * different address. 478d362c40dSPawel Jakub Dawidek */ 479d362c40dSPawel Jakub Dawidek if (vm_memguard_mtype != NULL) 480d362c40dSPawel Jakub Dawidek return (mtp == vm_memguard_mtype); 481d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 482d362c40dSPawel Jakub Dawidek vm_memguard_mtype = mtp; 483d362c40dSPawel Jakub Dawidek return (1); 484d362c40dSPawel Jakub Dawidek } 485d362c40dSPawel Jakub Dawidek return (0); 486d362c40dSPawel Jakub Dawidek #endif 487d362c40dSPawel Jakub Dawidek } 4888d689e04SGleb Smirnoff 4898d689e04SGleb Smirnoff int 4908d689e04SGleb Smirnoff memguard_cmp_zone(uma_zone_t zone) 4918d689e04SGleb Smirnoff { 4928d689e04SGleb Smirnoff 4938d689e04SGleb Smirnoff if ((memguard_options & MG_GUARD_NOFREE) == 0 && 4948d689e04SGleb Smirnoff zone->uz_flags & UMA_ZONE_NOFREE) 4958d689e04SGleb Smirnoff return (0); 4968d689e04SGleb Smirnoff 4978d689e04SGleb Smirnoff if (memguard_cmp(zone->uz_size)) 4988d689e04SGleb Smirnoff return (1); 4998d689e04SGleb Smirnoff 5008d689e04SGleb Smirnoff /* 5018d689e04SGleb Smirnoff * The safest way of comparsion is to always compare zone name, 5028d689e04SGleb Smirnoff * but it is also the slowest way. 5038d689e04SGleb Smirnoff */ 5048d689e04SGleb Smirnoff return (strcmp(zone->uz_name, vm_memguard_desc) == 0); 5058d689e04SGleb Smirnoff } 506