xref: /freebsd/lib/libmemstat/memstat.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/sysctl.h>
32 
33 #include <err.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 
39 #include "memstat.h"
40 #include "memstat_internal.h"
41 
42 const char *
memstat_strerror(int error)43 memstat_strerror(int error)
44 {
45 
46 	switch (error) {
47 	case MEMSTAT_ERROR_NOMEMORY:
48 		return ("Cannot allocate memory");
49 	case MEMSTAT_ERROR_VERSION:
50 		return ("Version mismatch");
51 	case MEMSTAT_ERROR_PERMISSION:
52 		return ("Permission denied");
53 	case MEMSTAT_ERROR_DATAERROR:
54 		return ("Data format error");
55 	case MEMSTAT_ERROR_KVM:
56 		return ("KVM error");
57 	case MEMSTAT_ERROR_KVM_NOSYMBOL:
58 		return ("KVM unable to find symbol");
59 	case MEMSTAT_ERROR_KVM_SHORTREAD:
60 		return ("KVM short read");
61 	case MEMSTAT_ERROR_UNDEFINED:
62 	default:
63 		return ("Unknown error");
64 	}
65 }
66 
67 struct memory_type_list *
memstat_mtl_alloc(void)68 memstat_mtl_alloc(void)
69 {
70 	struct memory_type_list *mtlp;
71 
72 	mtlp = malloc(sizeof(*mtlp));
73 	if (mtlp == NULL)
74 		return (NULL);
75 
76 	LIST_INIT(&mtlp->mtl_list);
77 	mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED;
78 	return (mtlp);
79 }
80 
81 struct memory_type *
memstat_mtl_first(struct memory_type_list * list)82 memstat_mtl_first(struct memory_type_list *list)
83 {
84 
85 	return (LIST_FIRST(&list->mtl_list));
86 }
87 
88 struct memory_type *
memstat_mtl_next(struct memory_type * mtp)89 memstat_mtl_next(struct memory_type *mtp)
90 {
91 
92 	return (LIST_NEXT(mtp, mt_list));
93 }
94 
95 void
_memstat_mtl_empty(struct memory_type_list * list)96 _memstat_mtl_empty(struct memory_type_list *list)
97 {
98 	struct memory_type *mtp;
99 
100 	while ((mtp = LIST_FIRST(&list->mtl_list))) {
101 		free(mtp->mt_percpu_alloc);
102 		free(mtp->mt_percpu_cache);
103 		LIST_REMOVE(mtp, mt_list);
104 		free(mtp);
105 	}
106 }
107 
108 void
memstat_mtl_free(struct memory_type_list * list)109 memstat_mtl_free(struct memory_type_list *list)
110 {
111 
112 	_memstat_mtl_empty(list);
113 	free(list);
114 }
115 
116 int
memstat_mtl_geterror(struct memory_type_list * list)117 memstat_mtl_geterror(struct memory_type_list *list)
118 {
119 
120 	return (list->mtl_error);
121 }
122 
123 /*
124  * Look for an existing memory_type entry in a memory_type list, based on the
125  * allocator and name of the type.  If not found, return NULL.  No errno or
126  * memstat error.
127  */
128 struct memory_type *
memstat_mtl_find(struct memory_type_list * list,int allocator,const char * name)129 memstat_mtl_find(struct memory_type_list *list, int allocator,
130     const char *name)
131 {
132 	struct memory_type *mtp;
133 
134 	LIST_FOREACH(mtp, &list->mtl_list, mt_list) {
135 		if ((mtp->mt_allocator == allocator ||
136 		    allocator == ALLOCATOR_ANY) &&
137 		    strcmp(mtp->mt_name, name) == 0)
138 			return (mtp);
139 	}
140 	return (NULL);
141 }
142 
143 /*
144  * Allocate a new memory_type with the specificed allocator type and name,
145  * then insert into the list.  The structure will be zero'd.
146  *
147  * libmemstat(3) internal function.
148  */
149 struct memory_type *
_memstat_mt_allocate(struct memory_type_list * list,int allocator,const char * name,int maxcpus)150 _memstat_mt_allocate(struct memory_type_list *list, int allocator,
151     const char *name, int maxcpus)
152 {
153 	struct memory_type *mtp;
154 
155 	mtp = malloc(sizeof(*mtp));
156 	if (mtp == NULL)
157 		return (NULL);
158 
159 	bzero(mtp, sizeof(*mtp));
160 
161 	mtp->mt_allocator = allocator;
162 	mtp->mt_percpu_alloc = malloc(sizeof(struct mt_percpu_alloc_s) *
163 	    maxcpus);
164 	mtp->mt_percpu_cache = malloc(sizeof(struct mt_percpu_cache_s) *
165 	    maxcpus);
166 	strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
167 	LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
168 	return (mtp);
169 }
170 
171 /*
172  * Reset any libmemstat(3)-owned statistics in a memory_type record so that
173  * it can be reused without incremental addition problems.  Caller-owned
174  * memory is left "as-is", and must be updated by the caller if desired.
175  *
176  * libmemstat(3) internal function.
177  */
178 void
_memstat_mt_reset_stats(struct memory_type * mtp,int maxcpus)179 _memstat_mt_reset_stats(struct memory_type *mtp, int maxcpus)
180 {
181 	int i;
182 
183 	mtp->mt_countlimit = 0;
184 	mtp->mt_byteslimit = 0;
185 	mtp->mt_sizemask = 0;
186 	mtp->mt_size = 0;
187 
188 	mtp->mt_memalloced = 0;
189 	mtp->mt_memfreed = 0;
190 	mtp->mt_numallocs = 0;
191 	mtp->mt_numfrees = 0;
192 	mtp->mt_bytes = 0;
193 	mtp->mt_count = 0;
194 	mtp->mt_free = 0;
195 	mtp->mt_failures = 0;
196 	mtp->mt_sleeps = 0;
197 
198 	mtp->mt_zonefree = 0;
199 	mtp->mt_kegfree = 0;
200 
201 	for (i = 0; i < maxcpus; i++) {
202 		mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
203 		mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
204 		mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
205 		mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
206 		mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
207 		mtp->mt_percpu_cache[i].mtp_free = 0;
208 	}
209 }
210 
211 /*
212  * Accessor methods for struct memory_type.  Avoids encoding the structure
213  * ABI into the application.
214  */
215 const char *
memstat_get_name(const struct memory_type * mtp)216 memstat_get_name(const struct memory_type *mtp)
217 {
218 
219 	return (mtp->mt_name);
220 }
221 
222 int
memstat_get_allocator(const struct memory_type * mtp)223 memstat_get_allocator(const struct memory_type *mtp)
224 {
225 
226 	return (mtp->mt_allocator);
227 }
228 
229 uint64_t
memstat_get_countlimit(const struct memory_type * mtp)230 memstat_get_countlimit(const struct memory_type *mtp)
231 {
232 
233 	return (mtp->mt_countlimit);
234 }
235 
236 uint64_t
memstat_get_byteslimit(const struct memory_type * mtp)237 memstat_get_byteslimit(const struct memory_type *mtp)
238 {
239 
240 	return (mtp->mt_byteslimit);
241 }
242 
243 uint64_t
memstat_get_sizemask(const struct memory_type * mtp)244 memstat_get_sizemask(const struct memory_type *mtp)
245 {
246 
247 	return (mtp->mt_sizemask);
248 }
249 
250 uint64_t
memstat_get_size(const struct memory_type * mtp)251 memstat_get_size(const struct memory_type *mtp)
252 {
253 
254 	return (mtp->mt_size);
255 }
256 
257 uint64_t
memstat_get_rsize(const struct memory_type * mtp)258 memstat_get_rsize(const struct memory_type *mtp)
259 {
260 
261 	return (mtp->mt_rsize);
262 }
263 
264 uint64_t
memstat_get_memalloced(const struct memory_type * mtp)265 memstat_get_memalloced(const struct memory_type *mtp)
266 {
267 
268 	return (mtp->mt_memalloced);
269 }
270 
271 uint64_t
memstat_get_memfreed(const struct memory_type * mtp)272 memstat_get_memfreed(const struct memory_type *mtp)
273 {
274 
275 	return (mtp->mt_memfreed);
276 }
277 
278 uint64_t
memstat_get_numallocs(const struct memory_type * mtp)279 memstat_get_numallocs(const struct memory_type *mtp)
280 {
281 
282 	return (mtp->mt_numallocs);
283 }
284 
285 uint64_t
memstat_get_numfrees(const struct memory_type * mtp)286 memstat_get_numfrees(const struct memory_type *mtp)
287 {
288 
289 	return (mtp->mt_numfrees);
290 }
291 
292 uint64_t
memstat_get_bytes(const struct memory_type * mtp)293 memstat_get_bytes(const struct memory_type *mtp)
294 {
295 
296 	return (mtp->mt_bytes);
297 }
298 
299 uint64_t
memstat_get_count(const struct memory_type * mtp)300 memstat_get_count(const struct memory_type *mtp)
301 {
302 
303 	return (mtp->mt_count);
304 }
305 
306 uint64_t
memstat_get_free(const struct memory_type * mtp)307 memstat_get_free(const struct memory_type *mtp)
308 {
309 
310 	return (mtp->mt_free);
311 }
312 
313 uint64_t
memstat_get_failures(const struct memory_type * mtp)314 memstat_get_failures(const struct memory_type *mtp)
315 {
316 
317 	return (mtp->mt_failures);
318 }
319 
320 uint64_t
memstat_get_sleeps(const struct memory_type * mtp)321 memstat_get_sleeps(const struct memory_type *mtp)
322 {
323 
324 	return (mtp->mt_sleeps);
325 }
326 
327 uint64_t
memstat_get_xdomain(const struct memory_type * mtp)328 memstat_get_xdomain(const struct memory_type *mtp)
329 {
330 
331 	return (mtp->mt_xdomain);
332 }
333 
334 void *
memstat_get_caller_pointer(const struct memory_type * mtp,int index)335 memstat_get_caller_pointer(const struct memory_type *mtp, int index)
336 {
337 
338 	return (mtp->mt_caller_pointer[index]);
339 }
340 
341 void
memstat_set_caller_pointer(struct memory_type * mtp,int index,void * value)342 memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
343 {
344 
345 	mtp->mt_caller_pointer[index] = value;
346 }
347 
348 uint64_t
memstat_get_caller_uint64(const struct memory_type * mtp,int index)349 memstat_get_caller_uint64(const struct memory_type *mtp, int index)
350 {
351 
352 	return (mtp->mt_caller_uint64[index]);
353 }
354 
355 void
memstat_set_caller_uint64(struct memory_type * mtp,int index,uint64_t value)356 memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
357 {
358 
359 	mtp->mt_caller_uint64[index] = value;
360 }
361 
362 uint64_t
memstat_get_zonefree(const struct memory_type * mtp)363 memstat_get_zonefree(const struct memory_type *mtp)
364 {
365 
366 	return (mtp->mt_zonefree);
367 }
368 
369 uint64_t
memstat_get_kegfree(const struct memory_type * mtp)370 memstat_get_kegfree(const struct memory_type *mtp)
371 {
372 
373 	return (mtp->mt_kegfree);
374 }
375 
376 uint64_t
memstat_get_percpu_memalloced(const struct memory_type * mtp,int cpu)377 memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
378 {
379 
380 	return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
381 }
382 
383 uint64_t
memstat_get_percpu_memfreed(const struct memory_type * mtp,int cpu)384 memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
385 {
386 
387 	return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
388 }
389 
390 uint64_t
memstat_get_percpu_numallocs(const struct memory_type * mtp,int cpu)391 memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
392 {
393 
394 	return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
395 }
396 
397 uint64_t
memstat_get_percpu_numfrees(const struct memory_type * mtp,int cpu)398 memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
399 {
400 
401 	return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
402 }
403 
404 uint64_t
memstat_get_percpu_sizemask(const struct memory_type * mtp,int cpu)405 memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
406 {
407 
408 	return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
409 }
410 
411 void *
memstat_get_percpu_caller_pointer(const struct memory_type * mtp,int cpu,int index)412 memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
413     int index)
414 {
415 
416 	return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
417 }
418 
419 void
memstat_set_percpu_caller_pointer(struct memory_type * mtp,int cpu,int index,void * value)420 memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
421     int index, void *value)
422 {
423 
424 	mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
425 }
426 
427 uint64_t
memstat_get_percpu_caller_uint64(const struct memory_type * mtp,int cpu,int index)428 memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
429     int index)
430 {
431 
432 	return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
433 }
434 
435 void
memstat_set_percpu_caller_uint64(struct memory_type * mtp,int cpu,int index,uint64_t value)436 memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
437     uint64_t value)
438 {
439 
440 	mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
441 }
442 
443 uint64_t
memstat_get_percpu_free(const struct memory_type * mtp,int cpu)444 memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
445 {
446 
447 	return (mtp->mt_percpu_cache[cpu].mtp_free);
448 }
449