1 /*- 2 * Copyright (c) 2005 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/malloc.h> 32 #include <sys/sysctl.h> 33 34 #include <err.h> 35 #include <errno.h> 36 #include <kvm.h> 37 #include <nlist.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 42 #include "memstat.h" 43 #include "memstat_internal.h" 44 45 static struct nlist namelist[] = { 46 #define X_KMEMSTATISTICS 0 47 { .n_name = "_kmemstatistics" }, 48 #define X_MP_MAXCPUS 1 49 { .n_name = "_mp_maxcpus" }, 50 { .n_name = "" }, 51 }; 52 53 /* 54 * Extract malloc(9) statistics from the running kernel, and store all memory 55 * type information in the passed list. For each type, check the list for an 56 * existing entry with the right name/allocator -- if present, update that 57 * entry. Otherwise, add a new entry. On error, the entire list will be 58 * cleared, as entries will be in an inconsistent state. 59 * 60 * To reduce the level of work for a list that starts empty, we keep around a 61 * hint as to whether it was empty when we began, so we can avoid searching 62 * the list for entries to update. Updates are O(n^2) due to searching for 63 * each entry before adding it. 64 */ 65 int 66 memstat_sysctl_malloc(struct memory_type_list *list, int flags) 67 { 68 struct malloc_type_stream_header *mtshp; 69 struct malloc_type_header *mthp; 70 struct malloc_type_stats *mtsp; 71 struct memory_type *mtp; 72 int count, hint_dontsearch, i, j, maxcpus; 73 char *buffer, *p; 74 size_t size; 75 76 hint_dontsearch = LIST_EMPTY(&list->mtl_list); 77 78 /* 79 * Query the number of CPUs, number of malloc types so that we can 80 * guess an initial buffer size. We loop until we succeed or really 81 * fail. Note that the value of maxcpus we query using sysctl is not 82 * the version we use when processing the real data -- that is read 83 * from the header. 84 */ 85 retry: 86 size = sizeof(maxcpus); 87 if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) { 88 if (errno == EACCES || errno == EPERM) 89 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 90 else 91 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 92 return (-1); 93 } 94 if (size != sizeof(maxcpus)) { 95 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 96 return (-1); 97 } 98 99 if (maxcpus > MEMSTAT_MAXCPU) { 100 list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 101 return (-1); 102 } 103 104 size = sizeof(count); 105 if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) { 106 if (errno == EACCES || errno == EPERM) 107 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 108 else 109 list->mtl_error = MEMSTAT_ERROR_VERSION; 110 return (-1); 111 } 112 if (size != sizeof(count)) { 113 list->mtl_error = MEMSTAT_ERROR_DATAERROR; 114 return (-1); 115 } 116 117 size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) * 118 maxcpus); 119 120 buffer = malloc(size); 121 if (buffer == NULL) { 122 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 123 return (-1); 124 } 125 126 if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) { 127 /* 128 * XXXRW: ENOMEM is an ambiguous return, we should bound the 129 * number of loops, perhaps. 130 */ 131 if (errno == ENOMEM) { 132 free(buffer); 133 goto retry; 134 } 135 if (errno == EACCES || errno == EPERM) 136 list->mtl_error = MEMSTAT_ERROR_PERMISSION; 137 else 138 list->mtl_error = MEMSTAT_ERROR_VERSION; 139 free(buffer); 140 return (-1); 141 } 142 143 if (size == 0) { 144 free(buffer); 145 return (0); 146 } 147 148 if (size < sizeof(*mtshp)) { 149 list->mtl_error = MEMSTAT_ERROR_VERSION; 150 free(buffer); 151 return (-1); 152 } 153 p = buffer; 154 mtshp = (struct malloc_type_stream_header *)p; 155 p += sizeof(*mtshp); 156 157 if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) { 158 list->mtl_error = MEMSTAT_ERROR_VERSION; 159 free(buffer); 160 return (-1); 161 } 162 163 if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) { 164 list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 165 free(buffer); 166 return (-1); 167 } 168 169 /* 170 * For the remainder of this function, we are quite trusting about 171 * the layout of structures and sizes, since we've determined we have 172 * a matching version and acceptable CPU count. 173 */ 174 maxcpus = mtshp->mtsh_maxcpus; 175 count = mtshp->mtsh_count; 176 for (i = 0; i < count; i++) { 177 mthp = (struct malloc_type_header *)p; 178 p += sizeof(*mthp); 179 180 if (hint_dontsearch == 0) { 181 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, 182 mthp->mth_name); 183 } else 184 mtp = NULL; 185 if (mtp == NULL) 186 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC, 187 mthp->mth_name); 188 if (mtp == NULL) { 189 _memstat_mtl_empty(list); 190 free(buffer); 191 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 192 return (-1); 193 } 194 195 /* 196 * Reset the statistics on a current node. 197 */ 198 _memstat_mt_reset_stats(mtp); 199 200 for (j = 0; j < maxcpus; j++) { 201 mtsp = (struct malloc_type_stats *)p; 202 p += sizeof(*mtsp); 203 204 /* 205 * Sumarize raw statistics across CPUs into coalesced 206 * statistics. 207 */ 208 mtp->mt_memalloced += mtsp->mts_memalloced; 209 mtp->mt_memfreed += mtsp->mts_memfreed; 210 mtp->mt_numallocs += mtsp->mts_numallocs; 211 mtp->mt_numfrees += mtsp->mts_numfrees; 212 mtp->mt_sizemask |= mtsp->mts_size; 213 214 /* 215 * Copies of per-CPU statistics. 216 */ 217 mtp->mt_percpu_alloc[j].mtp_memalloced = 218 mtsp->mts_memalloced; 219 mtp->mt_percpu_alloc[j].mtp_memfreed = 220 mtsp->mts_memfreed; 221 mtp->mt_percpu_alloc[j].mtp_numallocs = 222 mtsp->mts_numallocs; 223 mtp->mt_percpu_alloc[j].mtp_numfrees = 224 mtsp->mts_numfrees; 225 mtp->mt_percpu_alloc[j].mtp_sizemask = 226 mtsp->mts_size; 227 } 228 229 /* 230 * Derived cross-CPU statistics. 231 */ 232 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 233 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 234 } 235 236 free(buffer); 237 238 return (0); 239 } 240 241 static int 242 kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size, 243 size_t offset) 244 { 245 ssize_t ret; 246 247 ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address, 248 size); 249 if (ret < 0) 250 return (MEMSTAT_ERROR_KVM); 251 if ((size_t)ret != size) 252 return (MEMSTAT_ERROR_KVM_SHORTREAD); 253 return (0); 254 } 255 256 static int 257 kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen) 258 { 259 ssize_t ret; 260 int i; 261 262 for (i = 0; i < buflen; i++) { 263 ret = kvm_read(kvm, __DECONST(unsigned long, kvm_pointer) + 264 i, &(buffer[i]), sizeof(char)); 265 if (ret < 0) 266 return (MEMSTAT_ERROR_KVM); 267 if ((size_t)ret != sizeof(char)) 268 return (MEMSTAT_ERROR_KVM_SHORTREAD); 269 if (buffer[i] == '\0') 270 return (0); 271 } 272 /* Truncate. */ 273 buffer[i-1] = '\0'; 274 return (0); 275 } 276 277 static int 278 kread_symbol(kvm_t *kvm, int index, void *address, size_t size, 279 size_t offset) 280 { 281 ssize_t ret; 282 283 ret = kvm_read(kvm, namelist[index].n_value + offset, address, size); 284 if (ret < 0) 285 return (MEMSTAT_ERROR_KVM); 286 if ((size_t)ret != size) 287 return (MEMSTAT_ERROR_KVM_SHORTREAD); 288 return (0); 289 } 290 291 int 292 memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle) 293 { 294 struct memory_type *mtp; 295 void *kmemstatistics; 296 int hint_dontsearch, j, mp_maxcpus, ret; 297 char name[MEMTYPE_MAXNAME]; 298 struct malloc_type_stats mts[MEMSTAT_MAXCPU], *mtsp; 299 struct malloc_type_internal *mtip; 300 struct malloc_type type, *typep; 301 kvm_t *kvm; 302 303 kvm = (kvm_t *)kvm_handle; 304 305 hint_dontsearch = LIST_EMPTY(&list->mtl_list); 306 307 if (kvm_nlist(kvm, namelist) != 0) { 308 list->mtl_error = MEMSTAT_ERROR_KVM; 309 return (-1); 310 } 311 312 if (namelist[X_KMEMSTATISTICS].n_type == 0 || 313 namelist[X_KMEMSTATISTICS].n_value == 0) { 314 list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL; 315 return (-1); 316 } 317 318 ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus, 319 sizeof(mp_maxcpus), 0); 320 if (ret != 0) { 321 list->mtl_error = ret; 322 return (-1); 323 } 324 325 if (mp_maxcpus > MEMSTAT_MAXCPU) { 326 list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS; 327 return (-1); 328 } 329 330 ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics, 331 sizeof(kmemstatistics), 0); 332 if (ret != 0) { 333 list->mtl_error = ret; 334 return (-1); 335 } 336 337 for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) { 338 ret = kread(kvm, typep, &type, sizeof(type), 0); 339 if (ret != 0) { 340 _memstat_mtl_empty(list); 341 list->mtl_error = ret; 342 return (-1); 343 } 344 ret = kread_string(kvm, (void *)type.ks_shortdesc, name, 345 MEMTYPE_MAXNAME); 346 if (ret != 0) { 347 _memstat_mtl_empty(list); 348 list->mtl_error = ret; 349 return (-1); 350 } 351 352 /* 353 * Since our compile-time value for MAXCPU may differ from the 354 * kernel's, we populate our own array. 355 */ 356 mtip = type.ks_handle; 357 ret = kread(kvm, mtip->mti_stats, mts, mp_maxcpus * 358 sizeof(struct malloc_type_stats), 0); 359 if (ret != 0) { 360 _memstat_mtl_empty(list); 361 list->mtl_error = ret; 362 return (-1); 363 } 364 365 if (hint_dontsearch == 0) { 366 mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name); 367 } else 368 mtp = NULL; 369 if (mtp == NULL) 370 mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC, 371 name); 372 if (mtp == NULL) { 373 _memstat_mtl_empty(list); 374 list->mtl_error = MEMSTAT_ERROR_NOMEMORY; 375 return (-1); 376 } 377 378 /* 379 * This logic is replicated from kern_malloc.c, and should 380 * be kept in sync. 381 */ 382 _memstat_mt_reset_stats(mtp); 383 for (j = 0; j < mp_maxcpus; j++) { 384 mtsp = &mts[j]; 385 mtp->mt_memalloced += mtsp->mts_memalloced; 386 mtp->mt_memfreed += mtsp->mts_memfreed; 387 mtp->mt_numallocs += mtsp->mts_numallocs; 388 mtp->mt_numfrees += mtsp->mts_numfrees; 389 mtp->mt_sizemask |= mtsp->mts_size; 390 391 mtp->mt_percpu_alloc[j].mtp_memalloced = 392 mtsp->mts_memalloced; 393 mtp->mt_percpu_alloc[j].mtp_memfreed = 394 mtsp->mts_memfreed; 395 mtp->mt_percpu_alloc[j].mtp_numallocs = 396 mtsp->mts_numallocs; 397 mtp->mt_percpu_alloc[j].mtp_numfrees = 398 mtsp->mts_numfrees; 399 mtp->mt_percpu_alloc[j].mtp_sizemask = 400 mtsp->mts_size; 401 } 402 403 mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed; 404 mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees; 405 } 406 407 return (0); 408 } 409