1 /*- 2 * Copyright (c) 2005 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/malloc.h> 31 #include <sys/sysctl.h> 32 33 #include <err.h> 34 #include <errno.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 39 #include "memstat.h" 40 #include "memstat_internal.h" 41 42 /* 43 * Extract malloc(9) statistics from the running kernel, and store all memory 44 * type information in the passed list. For each type, check the list for an 45 * existing entry with the right name/allocator -- if present, update that 46 * entry. Otherwise, add a new entry. On error, the entire list will be 47 * cleared, as entries will be in an inconsistent state. 48 * 49 * To reduce the level of work for a list that starts empty, we keep around a 50 * hint as to whether it was empty when we began, so we can avoid searching 51 * the list for entries to update. Updates are O(n^2) due to searching for 52 * each entry before adding it. 53 */ 54 int 55 memstat_sysctl_malloc(struct memory_type_list *list, int flags) 56 { 57 struct malloc_type_stream_header *mtshp; 58 struct malloc_type_header *mthp; 59 struct malloc_type_stats *mtsp; 60 struct memory_type *mtp; 61 int count, error, hint_dontsearch, i, j, maxcpus; 62 char *buffer, *p; 63 size_t size; 64 65 hint_dontsearch = LIST_EMPTY(list); 66 67 /* 68 * Query the number of CPUs, number of malloc types so that we can 69 * guess an initial buffer size. We loop until we succeed or really 70 * fail. Note that the value of maxcpus we query using sysctl is not 71 * the version we use when processing the real data -- that is read 72 * from the header. 73 */ 74 retry: 75 size = sizeof(maxcpus); 76 if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) { 77 error = errno; 78 perror("kern.smp.maxcpus"); 79 errno = error; 80 return (-1); 81 } 82 if (size != sizeof(maxcpus)) { 83 fprintf(stderr, "kern.smp.maxcpus: wrong size"); 84 errno = EINVAL; 85 return (-1); 86 } 87 88 if (maxcpus > MEMSTAT_MAXCPU) { 89 fprintf(stderr, "kern.smp.maxcpus: too many CPUs\n"); 90 errno = EINVAL; 91 return (-1); 92 } 93 94 size = sizeof(count); 95 if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) { 96 error = errno; 97 perror("kern.malloc_count"); 98 errno = error; 99 return (-1); 100 } 101 if (size != sizeof(count)) { 102 fprintf(stderr, "kern.malloc_count: wrong size"); 103 errno = EINVAL; 104 return (-1); 105 } 106 107 size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) * 108 maxcpus); 109 110 buffer = malloc(size); 111 if (buffer == NULL) { 112 error = errno; 113 perror("malloc"); 114 errno = error; 115 return (-1); 116 } 117 118 if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) { 119 /* 120 * XXXRW: ENOMEM is an ambiguous return, we should bound the 121 * number of loops, perhaps. 122 */ 123 if (errno == ENOMEM) { 124 free(buffer); 125 goto retry; 126 } 127 error = errno; 128 free(buffer); 129 perror("kern.malloc_stats"); 130 errno = error; 131 return (-1); 132 } 133 134 if (size == 0) { 135 free(buffer); 136 return (0); 137 } 138 139 if (size < sizeof(*mtshp)) { 140 fprintf(stderr, "sysctl_malloc: invalid malloc header"); 141 free(buffer); 142 errno = EINVAL; 143 return (-1); 144 } 145 p = buffer; 146 mtshp = (struct malloc_type_stream_header *)p; 147 p += sizeof(*mtshp); 148 149 if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) { 150 fprintf(stderr, "sysctl_malloc: unknown malloc version"); 151 free(buffer); 152 errno = EINVAL; 153 return (-1); 154 } 155 156 if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) { 157 fprintf(stderr, "sysctl_malloc: too many CPUs"); 158 free(buffer); 159 errno = EINVAL; 160 return (-1); 161 } 162 163 /* 164 * For the remainder of this function, we are quite trusting about 165 * the layout of structures and sizes, since we've determined we have 166 * a matching version and acceptable CPU count. 167 */ 168 maxcpus = mtshp->mtsh_maxcpus; 169 count = mtshp->mtsh_count; 170 for (i = 0; i < count; i++) { 171 mthp = (struct malloc_type_header *)p; 172 p += sizeof(*mthp); 173 174 if (hint_dontsearch == 0) { 175 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, 176 mthp->mth_name); 177 /* 178 * Reset the statistics on a reused node. 179 */ 180 if (mtp != NULL) 181 memstat_mt_reset_stats(mtp); 182 } else 183 mtp = NULL; 184 if (mtp == NULL) 185 mtp = memstat_mt_allocate(list, ALLOCATOR_MALLOC, 186 mthp->mth_name); 187 if (mtp == NULL) { 188 memstat_mtl_free(list); 189 free(buffer); 190 errno = ENOMEM; 191 perror("malloc"); 192 errno = ENOMEM; 193 return (-1); 194 } 195 196 /* 197 * Reset the statistics on a current node. 198 */ 199 memstat_mt_reset_stats(mtp); 200 201 for (j = 0; j < maxcpus; j++) { 202 mtsp = (struct malloc_type_stats *)p; 203 p += sizeof(*mtsp); 204 205 /* 206 * Sumarize raw statistics across CPUs into coalesced 207 * statistics. 208 */ 209 mtp->mt_memalloced += mtsp->mts_memalloced; 210 mtp->mt_memfreed += mtsp->mts_memfreed; 211 mtp->mt_numallocs += mtsp->mts_numallocs; 212 mtp->mt_numfrees += mtsp->mts_numfrees; 213 mtp->mt_sizemask |= mtsp->mts_size; 214 215 /* 216 * Copies of per-CPU statistics. 217 */ 218 mtp->mt_percpu_alloc[j].mtp_memalloced = 219 mtsp->mts_memalloced; 220 mtp->mt_percpu_alloc[j].mtp_memfreed = 221 mtsp->mts_memfreed; 222 mtp->mt_percpu_alloc[j].mtp_numallocs = 223 mtsp->mts_numallocs; 224 mtp->mt_percpu_alloc[j].mtp_numfrees = 225 mtsp->mts_numfrees; 226 mtp->mt_percpu_alloc[j].mtp_sizemask = 227 mtsp->mts_size; 228 } 229 230 /* 231 * Derived cross-CPU statistics. 232 */ 233 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 234 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 235 } 236 237 free(buffer); 238 239 return (0); 240 } 241