1 /*- 2 * Copyright (c) 2005 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/sysctl.h> 31 32 #include <err.h> 33 #include <errno.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 38 #include "memstat.h" 39 #include "memstat_internal.h" 40 41 const char * 42 memstat_strerror(int error) 43 { 44 45 switch (error) { 46 case MEMSTAT_ERROR_NOMEMORY: 47 return ("Cannot allocate memory"); 48 case MEMSTAT_ERROR_VERSION: 49 return ("Version mismatch"); 50 case MEMSTAT_ERROR_PERMISSION: 51 return ("Permission denied"); 52 case MEMSTAT_ERROR_TOOMANYCPUS: 53 return ("Too many CPUs"); 54 case MEMSTAT_ERROR_DATAERROR: 55 return ("Data format error"); 56 case MEMSTAT_ERROR_KVM: 57 return ("KVM error"); 58 case MEMSTAT_ERROR_KVM_NOSYMBOL: 59 return ("KVM unable to find symbol"); 60 case MEMSTAT_ERROR_KVM_SHORTREAD: 61 return ("KVM short read"); 62 case MEMSTAT_ERROR_UNDEFINED: 63 default: 64 return ("Unknown error"); 65 } 66 } 67 68 struct memory_type_list * 69 memstat_mtl_alloc(void) 70 { 71 struct memory_type_list *mtlp; 72 73 mtlp = malloc(sizeof(*mtlp)); 74 if (mtlp == NULL) 75 return (NULL); 76 77 LIST_INIT(&mtlp->mtl_list); 78 mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED; 79 return (mtlp); 80 } 81 82 struct memory_type * 83 memstat_mtl_first(struct memory_type_list *list) 84 { 85 86 return (LIST_FIRST(&list->mtl_list)); 87 } 88 89 struct memory_type * 90 memstat_mtl_next(struct memory_type *mtp) 91 { 92 93 return (LIST_NEXT(mtp, mt_list)); 94 } 95 96 void 97 _memstat_mtl_empty(struct memory_type_list *list) 98 { 99 struct memory_type *mtp; 100 101 while ((mtp = LIST_FIRST(&list->mtl_list))) { 102 LIST_REMOVE(mtp, mt_list); 103 free(mtp); 104 } 105 } 106 107 void 108 memstat_mtl_free(struct memory_type_list *list) 109 { 110 111 _memstat_mtl_empty(list); 112 free(list); 113 } 114 115 int 116 memstat_mtl_geterror(struct memory_type_list *list) 117 { 118 119 return (list->mtl_error); 120 } 121 122 /* 123 * Look for an existing memory_type entry in a memory_type list, based on the 124 * allocator and name of the type. If not found, return NULL. No errno or 125 * memstat error. 126 */ 127 struct memory_type * 128 memstat_mtl_find(struct memory_type_list *list, int allocator, 129 const char *name) 130 { 131 struct memory_type *mtp; 132 133 LIST_FOREACH(mtp, &list->mtl_list, mt_list) { 134 if ((mtp->mt_allocator == allocator || 135 allocator == ALLOCATOR_ANY) && 136 strcmp(mtp->mt_name, name) == 0) 137 return (mtp); 138 } 139 return (NULL); 140 } 141 142 /* 143 * Allocate a new memory_type with the specificed allocator type and name, 144 * then insert into the list. The structure will be zero'd. 145 * 146 * libmemstat(3) internal function. 147 */ 148 struct memory_type * 149 _memstat_mt_allocate(struct memory_type_list *list, int allocator, 150 const char *name) 151 { 152 struct memory_type *mtp; 153 154 mtp = malloc(sizeof(*mtp)); 155 if (mtp == NULL) 156 return (NULL); 157 158 bzero(mtp, sizeof(*mtp)); 159 160 mtp->mt_allocator = allocator; 161 strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME); 162 LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list); 163 return (mtp); 164 } 165 166 /* 167 * Reset any libmemstat(3)-owned statistics in a memory_type record so that 168 * it can be reused without incremental addition problems. Caller-owned 169 * memory is left "as-is", and must be updated by the caller if desired. 170 * 171 * libmemstat(3) internal function. 172 */ 173 void 174 _memstat_mt_reset_stats(struct memory_type *mtp) 175 { 176 int i; 177 178 mtp->mt_countlimit = 0; 179 mtp->mt_byteslimit = 0; 180 mtp->mt_sizemask = 0; 181 mtp->mt_size = 0; 182 183 mtp->mt_memalloced = 0; 184 mtp->mt_memfreed = 0; 185 mtp->mt_numallocs = 0; 186 mtp->mt_numfrees = 0; 187 mtp->mt_bytes = 0; 188 mtp->mt_count = 0; 189 mtp->mt_free = 0; 190 mtp->mt_failures = 0; 191 mtp->mt_sleeps = 0; 192 193 mtp->mt_zonefree = 0; 194 mtp->mt_kegfree = 0; 195 196 for (i = 0; i < MEMSTAT_MAXCPU; i++) { 197 mtp->mt_percpu_alloc[i].mtp_memalloced = 0; 198 mtp->mt_percpu_alloc[i].mtp_memfreed = 0; 199 mtp->mt_percpu_alloc[i].mtp_numallocs = 0; 200 mtp->mt_percpu_alloc[i].mtp_numfrees = 0; 201 mtp->mt_percpu_alloc[i].mtp_sizemask = 0; 202 mtp->mt_percpu_cache[i].mtp_free = 0; 203 } 204 } 205 206 /* 207 * Accessor methods for struct memory_type. Avoids encoding the structure 208 * ABI into the application. 209 */ 210 const char * 211 memstat_get_name(const struct memory_type *mtp) 212 { 213 214 return (mtp->mt_name); 215 } 216 217 int 218 memstat_get_allocator(const struct memory_type *mtp) 219 { 220 221 return (mtp->mt_allocator); 222 } 223 224 uint64_t 225 memstat_get_countlimit(const struct memory_type *mtp) 226 { 227 228 return (mtp->mt_countlimit); 229 } 230 231 uint64_t 232 memstat_get_byteslimit(const struct memory_type *mtp) 233 { 234 235 return (mtp->mt_byteslimit); 236 } 237 238 uint64_t 239 memstat_get_sizemask(const struct memory_type *mtp) 240 { 241 242 return (mtp->mt_sizemask); 243 } 244 245 uint64_t 246 memstat_get_size(const struct memory_type *mtp) 247 { 248 249 return (mtp->mt_size); 250 } 251 252 uint64_t 253 memstat_get_memalloced(const struct memory_type *mtp) 254 { 255 256 return (mtp->mt_memalloced); 257 } 258 259 uint64_t 260 memstat_get_memfreed(const struct memory_type *mtp) 261 { 262 263 return (mtp->mt_memfreed); 264 } 265 266 uint64_t 267 memstat_get_numallocs(const struct memory_type *mtp) 268 { 269 270 return (mtp->mt_numallocs); 271 } 272 273 uint64_t 274 memstat_get_numfrees(const struct memory_type *mtp) 275 { 276 277 return (mtp->mt_numfrees); 278 } 279 280 uint64_t 281 memstat_get_bytes(const struct memory_type *mtp) 282 { 283 284 return (mtp->mt_bytes); 285 } 286 287 uint64_t 288 memstat_get_count(const struct memory_type *mtp) 289 { 290 291 return (mtp->mt_count); 292 } 293 294 uint64_t 295 memstat_get_free(const struct memory_type *mtp) 296 { 297 298 return (mtp->mt_free); 299 } 300 301 uint64_t 302 memstat_get_failures(const struct memory_type *mtp) 303 { 304 305 return (mtp->mt_failures); 306 } 307 308 uint64_t 309 memstat_get_sleeps(const struct memory_type *mtp) 310 { 311 312 return (mtp->mt_sleeps); 313 } 314 315 void * 316 memstat_get_caller_pointer(const struct memory_type *mtp, int index) 317 { 318 319 return (mtp->mt_caller_pointer[index]); 320 } 321 322 void 323 memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value) 324 { 325 326 mtp->mt_caller_pointer[index] = value; 327 } 328 329 uint64_t 330 memstat_get_caller_uint64(const struct memory_type *mtp, int index) 331 { 332 333 return (mtp->mt_caller_uint64[index]); 334 } 335 336 void 337 memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value) 338 { 339 340 mtp->mt_caller_uint64[index] = value; 341 } 342 343 uint64_t 344 memstat_get_zonefree(const struct memory_type *mtp) 345 { 346 347 return (mtp->mt_zonefree); 348 } 349 350 uint64_t 351 memstat_get_kegfree(const struct memory_type *mtp) 352 { 353 354 return (mtp->mt_kegfree); 355 } 356 357 uint64_t 358 memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu) 359 { 360 361 return (mtp->mt_percpu_alloc[cpu].mtp_memalloced); 362 } 363 364 uint64_t 365 memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu) 366 { 367 368 return (mtp->mt_percpu_alloc[cpu].mtp_memfreed); 369 } 370 371 uint64_t 372 memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu) 373 { 374 375 return (mtp->mt_percpu_alloc[cpu].mtp_numallocs); 376 } 377 378 uint64_t 379 memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu) 380 { 381 382 return (mtp->mt_percpu_alloc[cpu].mtp_numfrees); 383 } 384 385 uint64_t 386 memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu) 387 { 388 389 return (mtp->mt_percpu_alloc[cpu].mtp_sizemask); 390 } 391 392 void * 393 memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu, 394 int index) 395 { 396 397 return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]); 398 } 399 400 void 401 memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu, 402 int index, void *value) 403 { 404 405 mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value; 406 } 407 408 uint64_t 409 memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu, 410 int index) 411 { 412 413 return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]); 414 } 415 416 void 417 memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index, 418 uint64_t value) 419 { 420 421 mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value; 422 } 423 424 uint64_t 425 memstat_get_percpu_free(const struct memory_type *mtp, int cpu) 426 { 427 428 return (mtp->mt_percpu_cache[cpu].mtp_free); 429 } 430