xref: /linux/tools/perf/builtin-kmem.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 #include "builtin.h"
2 #include "perf.h"
3 
4 #include "util/evlist.h"
5 #include "util/evsel.h"
6 #include "util/util.h"
7 #include "util/config.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/callchain.h"
14 #include "util/time-utils.h"
15 
16 #include <subcmd/parse-options.h>
17 #include "util/trace-event.h"
18 #include "util/data.h"
19 #include "util/cpumap.h"
20 
21 #include "util/debug.h"
22 
23 #include <linux/rbtree.h>
24 #include <linux/string.h>
25 #include <locale.h>
26 #include <regex.h>
27 
28 static int	kmem_slab;
29 static int	kmem_page;
30 
31 static long	kmem_page_size;
32 static enum {
33 	KMEM_SLAB,
34 	KMEM_PAGE,
35 } kmem_default = KMEM_SLAB;  /* for backward compatibility */
36 
37 struct alloc_stat;
38 typedef int (*sort_fn_t)(void *, void *);
39 
40 static int			alloc_flag;
41 static int			caller_flag;
42 
43 static int			alloc_lines = -1;
44 static int			caller_lines = -1;
45 
46 static bool			raw_ip;
47 
48 struct alloc_stat {
49 	u64	call_site;
50 	u64	ptr;
51 	u64	bytes_req;
52 	u64	bytes_alloc;
53 	u64	last_alloc;
54 	u32	hit;
55 	u32	pingpong;
56 
57 	short	alloc_cpu;
58 
59 	struct rb_node node;
60 };
61 
62 static struct rb_root root_alloc_stat;
63 static struct rb_root root_alloc_sorted;
64 static struct rb_root root_caller_stat;
65 static struct rb_root root_caller_sorted;
66 
67 static unsigned long total_requested, total_allocated, total_freed;
68 static unsigned long nr_allocs, nr_cross_allocs;
69 
70 /* filters for controlling start and stop of time of analysis */
71 static struct perf_time_interval ptime;
72 const char *time_str;
73 
74 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
75 			     int bytes_req, int bytes_alloc, int cpu)
76 {
77 	struct rb_node **node = &root_alloc_stat.rb_node;
78 	struct rb_node *parent = NULL;
79 	struct alloc_stat *data = NULL;
80 
81 	while (*node) {
82 		parent = *node;
83 		data = rb_entry(*node, struct alloc_stat, node);
84 
85 		if (ptr > data->ptr)
86 			node = &(*node)->rb_right;
87 		else if (ptr < data->ptr)
88 			node = &(*node)->rb_left;
89 		else
90 			break;
91 	}
92 
93 	if (data && data->ptr == ptr) {
94 		data->hit++;
95 		data->bytes_req += bytes_req;
96 		data->bytes_alloc += bytes_alloc;
97 	} else {
98 		data = malloc(sizeof(*data));
99 		if (!data) {
100 			pr_err("%s: malloc failed\n", __func__);
101 			return -1;
102 		}
103 		data->ptr = ptr;
104 		data->pingpong = 0;
105 		data->hit = 1;
106 		data->bytes_req = bytes_req;
107 		data->bytes_alloc = bytes_alloc;
108 
109 		rb_link_node(&data->node, parent, node);
110 		rb_insert_color(&data->node, &root_alloc_stat);
111 	}
112 	data->call_site = call_site;
113 	data->alloc_cpu = cpu;
114 	data->last_alloc = bytes_alloc;
115 
116 	return 0;
117 }
118 
119 static int insert_caller_stat(unsigned long call_site,
120 			      int bytes_req, int bytes_alloc)
121 {
122 	struct rb_node **node = &root_caller_stat.rb_node;
123 	struct rb_node *parent = NULL;
124 	struct alloc_stat *data = NULL;
125 
126 	while (*node) {
127 		parent = *node;
128 		data = rb_entry(*node, struct alloc_stat, node);
129 
130 		if (call_site > data->call_site)
131 			node = &(*node)->rb_right;
132 		else if (call_site < data->call_site)
133 			node = &(*node)->rb_left;
134 		else
135 			break;
136 	}
137 
138 	if (data && data->call_site == call_site) {
139 		data->hit++;
140 		data->bytes_req += bytes_req;
141 		data->bytes_alloc += bytes_alloc;
142 	} else {
143 		data = malloc(sizeof(*data));
144 		if (!data) {
145 			pr_err("%s: malloc failed\n", __func__);
146 			return -1;
147 		}
148 		data->call_site = call_site;
149 		data->pingpong = 0;
150 		data->hit = 1;
151 		data->bytes_req = bytes_req;
152 		data->bytes_alloc = bytes_alloc;
153 
154 		rb_link_node(&data->node, parent, node);
155 		rb_insert_color(&data->node, &root_caller_stat);
156 	}
157 
158 	return 0;
159 }
160 
161 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
162 					   struct perf_sample *sample)
163 {
164 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
165 		      call_site = perf_evsel__intval(evsel, sample, "call_site");
166 	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
167 	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
168 
169 	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
170 	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
171 		return -1;
172 
173 	total_requested += bytes_req;
174 	total_allocated += bytes_alloc;
175 
176 	nr_allocs++;
177 	return 0;
178 }
179 
180 static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
181 						struct perf_sample *sample)
182 {
183 	int ret = perf_evsel__process_alloc_event(evsel, sample);
184 
185 	if (!ret) {
186 		int node1 = cpu__get_node(sample->cpu),
187 		    node2 = perf_evsel__intval(evsel, sample, "node");
188 
189 		if (node1 != node2)
190 			nr_cross_allocs++;
191 	}
192 
193 	return ret;
194 }
195 
196 static int ptr_cmp(void *, void *);
197 static int slab_callsite_cmp(void *, void *);
198 
199 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
200 					    unsigned long call_site,
201 					    struct rb_root *root,
202 					    sort_fn_t sort_fn)
203 {
204 	struct rb_node *node = root->rb_node;
205 	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
206 
207 	while (node) {
208 		struct alloc_stat *data;
209 		int cmp;
210 
211 		data = rb_entry(node, struct alloc_stat, node);
212 
213 		cmp = sort_fn(&key, data);
214 		if (cmp < 0)
215 			node = node->rb_left;
216 		else if (cmp > 0)
217 			node = node->rb_right;
218 		else
219 			return data;
220 	}
221 	return NULL;
222 }
223 
224 static int perf_evsel__process_free_event(struct perf_evsel *evsel,
225 					  struct perf_sample *sample)
226 {
227 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
228 	struct alloc_stat *s_alloc, *s_caller;
229 
230 	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
231 	if (!s_alloc)
232 		return 0;
233 
234 	total_freed += s_alloc->last_alloc;
235 
236 	if ((short)sample->cpu != s_alloc->alloc_cpu) {
237 		s_alloc->pingpong++;
238 
239 		s_caller = search_alloc_stat(0, s_alloc->call_site,
240 					     &root_caller_stat,
241 					     slab_callsite_cmp);
242 		if (!s_caller)
243 			return -1;
244 		s_caller->pingpong++;
245 	}
246 	s_alloc->alloc_cpu = -1;
247 
248 	return 0;
249 }
250 
251 static u64 total_page_alloc_bytes;
252 static u64 total_page_free_bytes;
253 static u64 total_page_nomatch_bytes;
254 static u64 total_page_fail_bytes;
255 static unsigned long nr_page_allocs;
256 static unsigned long nr_page_frees;
257 static unsigned long nr_page_fails;
258 static unsigned long nr_page_nomatch;
259 
260 static bool use_pfn;
261 static bool live_page;
262 static struct perf_session *kmem_session;
263 
264 #define MAX_MIGRATE_TYPES  6
265 #define MAX_PAGE_ORDER     11
266 
267 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
268 
269 struct page_stat {
270 	struct rb_node 	node;
271 	u64 		page;
272 	u64 		callsite;
273 	int 		order;
274 	unsigned 	gfp_flags;
275 	unsigned 	migrate_type;
276 	u64		alloc_bytes;
277 	u64 		free_bytes;
278 	int 		nr_alloc;
279 	int 		nr_free;
280 };
281 
282 static struct rb_root page_live_tree;
283 static struct rb_root page_alloc_tree;
284 static struct rb_root page_alloc_sorted;
285 static struct rb_root page_caller_tree;
286 static struct rb_root page_caller_sorted;
287 
288 struct alloc_func {
289 	u64 start;
290 	u64 end;
291 	char *name;
292 };
293 
294 static int nr_alloc_funcs;
295 static struct alloc_func *alloc_func_list;
296 
297 static int funcmp(const void *a, const void *b)
298 {
299 	const struct alloc_func *fa = a;
300 	const struct alloc_func *fb = b;
301 
302 	if (fa->start > fb->start)
303 		return 1;
304 	else
305 		return -1;
306 }
307 
308 static int callcmp(const void *a, const void *b)
309 {
310 	const struct alloc_func *fa = a;
311 	const struct alloc_func *fb = b;
312 
313 	if (fb->start <= fa->start && fa->end < fb->end)
314 		return 0;
315 
316 	if (fa->start > fb->start)
317 		return 1;
318 	else
319 		return -1;
320 }
321 
322 static int build_alloc_func_list(void)
323 {
324 	int ret;
325 	struct map *kernel_map;
326 	struct symbol *sym;
327 	struct rb_node *node;
328 	struct alloc_func *func;
329 	struct machine *machine = &kmem_session->machines.host;
330 	regex_t alloc_func_regex;
331 	const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
332 
333 	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
334 	if (ret) {
335 		char err[BUFSIZ];
336 
337 		regerror(ret, &alloc_func_regex, err, sizeof(err));
338 		pr_err("Invalid regex: %s\n%s", pattern, err);
339 		return -EINVAL;
340 	}
341 
342 	kernel_map = machine__kernel_map(machine);
343 	if (map__load(kernel_map) < 0) {
344 		pr_err("cannot load kernel map\n");
345 		return -ENOENT;
346 	}
347 
348 	map__for_each_symbol(kernel_map, sym, node) {
349 		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
350 			continue;
351 
352 		func = realloc(alloc_func_list,
353 			       (nr_alloc_funcs + 1) * sizeof(*func));
354 		if (func == NULL)
355 			return -ENOMEM;
356 
357 		pr_debug("alloc func: %s\n", sym->name);
358 		func[nr_alloc_funcs].start = sym->start;
359 		func[nr_alloc_funcs].end   = sym->end;
360 		func[nr_alloc_funcs].name  = sym->name;
361 
362 		alloc_func_list = func;
363 		nr_alloc_funcs++;
364 	}
365 
366 	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
367 
368 	regfree(&alloc_func_regex);
369 	return 0;
370 }
371 
372 /*
373  * Find first non-memory allocation function from callchain.
374  * The allocation functions are in the 'alloc_func_list'.
375  */
376 static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
377 {
378 	struct addr_location al;
379 	struct machine *machine = &kmem_session->machines.host;
380 	struct callchain_cursor_node *node;
381 
382 	if (alloc_func_list == NULL) {
383 		if (build_alloc_func_list() < 0)
384 			goto out;
385 	}
386 
387 	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
388 	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
389 
390 	callchain_cursor_commit(&callchain_cursor);
391 	while (true) {
392 		struct alloc_func key, *caller;
393 		u64 addr;
394 
395 		node = callchain_cursor_current(&callchain_cursor);
396 		if (node == NULL)
397 			break;
398 
399 		key.start = key.end = node->ip;
400 		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
401 				 sizeof(key), callcmp);
402 		if (!caller) {
403 			/* found */
404 			if (node->map)
405 				addr = map__unmap_ip(node->map, node->ip);
406 			else
407 				addr = node->ip;
408 
409 			return addr;
410 		} else
411 			pr_debug3("skipping alloc function: %s\n", caller->name);
412 
413 		callchain_cursor_advance(&callchain_cursor);
414 	}
415 
416 out:
417 	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
418 	return sample->ip;
419 }
420 
421 struct sort_dimension {
422 	const char		name[20];
423 	sort_fn_t		cmp;
424 	struct list_head	list;
425 };
426 
427 static LIST_HEAD(page_alloc_sort_input);
428 static LIST_HEAD(page_caller_sort_input);
429 
430 static struct page_stat *
431 __page_stat__findnew_page(struct page_stat *pstat, bool create)
432 {
433 	struct rb_node **node = &page_live_tree.rb_node;
434 	struct rb_node *parent = NULL;
435 	struct page_stat *data;
436 
437 	while (*node) {
438 		s64 cmp;
439 
440 		parent = *node;
441 		data = rb_entry(*node, struct page_stat, node);
442 
443 		cmp = data->page - pstat->page;
444 		if (cmp < 0)
445 			node = &parent->rb_left;
446 		else if (cmp > 0)
447 			node = &parent->rb_right;
448 		else
449 			return data;
450 	}
451 
452 	if (!create)
453 		return NULL;
454 
455 	data = zalloc(sizeof(*data));
456 	if (data != NULL) {
457 		data->page = pstat->page;
458 		data->order = pstat->order;
459 		data->gfp_flags = pstat->gfp_flags;
460 		data->migrate_type = pstat->migrate_type;
461 
462 		rb_link_node(&data->node, parent, node);
463 		rb_insert_color(&data->node, &page_live_tree);
464 	}
465 
466 	return data;
467 }
468 
469 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
470 {
471 	return __page_stat__findnew_page(pstat, false);
472 }
473 
474 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
475 {
476 	return __page_stat__findnew_page(pstat, true);
477 }
478 
479 static struct page_stat *
480 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
481 {
482 	struct rb_node **node = &page_alloc_tree.rb_node;
483 	struct rb_node *parent = NULL;
484 	struct page_stat *data;
485 	struct sort_dimension *sort;
486 
487 	while (*node) {
488 		int cmp = 0;
489 
490 		parent = *node;
491 		data = rb_entry(*node, struct page_stat, node);
492 
493 		list_for_each_entry(sort, &page_alloc_sort_input, list) {
494 			cmp = sort->cmp(pstat, data);
495 			if (cmp)
496 				break;
497 		}
498 
499 		if (cmp < 0)
500 			node = &parent->rb_left;
501 		else if (cmp > 0)
502 			node = &parent->rb_right;
503 		else
504 			return data;
505 	}
506 
507 	if (!create)
508 		return NULL;
509 
510 	data = zalloc(sizeof(*data));
511 	if (data != NULL) {
512 		data->page = pstat->page;
513 		data->order = pstat->order;
514 		data->gfp_flags = pstat->gfp_flags;
515 		data->migrate_type = pstat->migrate_type;
516 
517 		rb_link_node(&data->node, parent, node);
518 		rb_insert_color(&data->node, &page_alloc_tree);
519 	}
520 
521 	return data;
522 }
523 
524 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
525 {
526 	return __page_stat__findnew_alloc(pstat, false);
527 }
528 
529 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
530 {
531 	return __page_stat__findnew_alloc(pstat, true);
532 }
533 
534 static struct page_stat *
535 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
536 {
537 	struct rb_node **node = &page_caller_tree.rb_node;
538 	struct rb_node *parent = NULL;
539 	struct page_stat *data;
540 	struct sort_dimension *sort;
541 
542 	while (*node) {
543 		int cmp = 0;
544 
545 		parent = *node;
546 		data = rb_entry(*node, struct page_stat, node);
547 
548 		list_for_each_entry(sort, &page_caller_sort_input, list) {
549 			cmp = sort->cmp(pstat, data);
550 			if (cmp)
551 				break;
552 		}
553 
554 		if (cmp < 0)
555 			node = &parent->rb_left;
556 		else if (cmp > 0)
557 			node = &parent->rb_right;
558 		else
559 			return data;
560 	}
561 
562 	if (!create)
563 		return NULL;
564 
565 	data = zalloc(sizeof(*data));
566 	if (data != NULL) {
567 		data->callsite = pstat->callsite;
568 		data->order = pstat->order;
569 		data->gfp_flags = pstat->gfp_flags;
570 		data->migrate_type = pstat->migrate_type;
571 
572 		rb_link_node(&data->node, parent, node);
573 		rb_insert_color(&data->node, &page_caller_tree);
574 	}
575 
576 	return data;
577 }
578 
579 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
580 {
581 	return __page_stat__findnew_caller(pstat, false);
582 }
583 
584 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
585 {
586 	return __page_stat__findnew_caller(pstat, true);
587 }
588 
589 static bool valid_page(u64 pfn_or_page)
590 {
591 	if (use_pfn && pfn_or_page == -1UL)
592 		return false;
593 	if (!use_pfn && pfn_or_page == 0)
594 		return false;
595 	return true;
596 }
597 
598 struct gfp_flag {
599 	unsigned int flags;
600 	char *compact_str;
601 	char *human_readable;
602 };
603 
604 static struct gfp_flag *gfps;
605 static int nr_gfps;
606 
607 static int gfpcmp(const void *a, const void *b)
608 {
609 	const struct gfp_flag *fa = a;
610 	const struct gfp_flag *fb = b;
611 
612 	return fa->flags - fb->flags;
613 }
614 
615 /* see include/trace/events/mmflags.h */
616 static const struct {
617 	const char *original;
618 	const char *compact;
619 } gfp_compact_table[] = {
620 	{ "GFP_TRANSHUGE",		"THP" },
621 	{ "GFP_TRANSHUGE_LIGHT",	"THL" },
622 	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
623 	{ "GFP_HIGHUSER",		"HU" },
624 	{ "GFP_USER",			"U" },
625 	{ "GFP_TEMPORARY",		"TMP" },
626 	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
627 	{ "GFP_KERNEL",			"K" },
628 	{ "GFP_NOFS",			"NF" },
629 	{ "GFP_ATOMIC",			"A" },
630 	{ "GFP_NOIO",			"NI" },
631 	{ "GFP_NOWAIT",			"NW" },
632 	{ "GFP_DMA",			"D" },
633 	{ "__GFP_HIGHMEM",		"HM" },
634 	{ "GFP_DMA32",			"D32" },
635 	{ "__GFP_HIGH",			"H" },
636 	{ "__GFP_ATOMIC",		"_A" },
637 	{ "__GFP_IO",			"I" },
638 	{ "__GFP_FS",			"F" },
639 	{ "__GFP_COLD",			"CO" },
640 	{ "__GFP_NOWARN",		"NWR" },
641 	{ "__GFP_REPEAT",		"R" },
642 	{ "__GFP_NOFAIL",		"NF" },
643 	{ "__GFP_NORETRY",		"NR" },
644 	{ "__GFP_COMP",			"C" },
645 	{ "__GFP_ZERO",			"Z" },
646 	{ "__GFP_NOMEMALLOC",		"NMA" },
647 	{ "__GFP_MEMALLOC",		"MA" },
648 	{ "__GFP_HARDWALL",		"HW" },
649 	{ "__GFP_THISNODE",		"TN" },
650 	{ "__GFP_RECLAIMABLE",		"RC" },
651 	{ "__GFP_MOVABLE",		"M" },
652 	{ "__GFP_ACCOUNT",		"AC" },
653 	{ "__GFP_NOTRACK",		"NT" },
654 	{ "__GFP_WRITE",		"WR" },
655 	{ "__GFP_RECLAIM",		"R" },
656 	{ "__GFP_DIRECT_RECLAIM",	"DR" },
657 	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
658 	{ "__GFP_OTHER_NODE",		"ON" },
659 };
660 
661 static size_t max_gfp_len;
662 
663 static char *compact_gfp_flags(char *gfp_flags)
664 {
665 	char *orig_flags = strdup(gfp_flags);
666 	char *new_flags = NULL;
667 	char *str, *pos = NULL;
668 	size_t len = 0;
669 
670 	if (orig_flags == NULL)
671 		return NULL;
672 
673 	str = strtok_r(orig_flags, "|", &pos);
674 	while (str) {
675 		size_t i;
676 		char *new;
677 		const char *cpt;
678 
679 		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
680 			if (strcmp(gfp_compact_table[i].original, str))
681 				continue;
682 
683 			cpt = gfp_compact_table[i].compact;
684 			new = realloc(new_flags, len + strlen(cpt) + 2);
685 			if (new == NULL) {
686 				free(new_flags);
687 				return NULL;
688 			}
689 
690 			new_flags = new;
691 
692 			if (!len) {
693 				strcpy(new_flags, cpt);
694 			} else {
695 				strcat(new_flags, "|");
696 				strcat(new_flags, cpt);
697 				len++;
698 			}
699 
700 			len += strlen(cpt);
701 		}
702 
703 		str = strtok_r(NULL, "|", &pos);
704 	}
705 
706 	if (max_gfp_len < len)
707 		max_gfp_len = len;
708 
709 	free(orig_flags);
710 	return new_flags;
711 }
712 
713 static char *compact_gfp_string(unsigned long gfp_flags)
714 {
715 	struct gfp_flag key = {
716 		.flags = gfp_flags,
717 	};
718 	struct gfp_flag *gfp;
719 
720 	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
721 	if (gfp)
722 		return gfp->compact_str;
723 
724 	return NULL;
725 }
726 
727 static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
728 			   unsigned int gfp_flags)
729 {
730 	struct pevent_record record = {
731 		.cpu = sample->cpu,
732 		.data = sample->raw_data,
733 		.size = sample->raw_size,
734 	};
735 	struct trace_seq seq;
736 	char *str, *pos = NULL;
737 
738 	if (nr_gfps) {
739 		struct gfp_flag key = {
740 			.flags = gfp_flags,
741 		};
742 
743 		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
744 			return 0;
745 	}
746 
747 	trace_seq_init(&seq);
748 	pevent_event_info(&seq, evsel->tp_format, &record);
749 
750 	str = strtok_r(seq.buffer, " ", &pos);
751 	while (str) {
752 		if (!strncmp(str, "gfp_flags=", 10)) {
753 			struct gfp_flag *new;
754 
755 			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
756 			if (new == NULL)
757 				return -ENOMEM;
758 
759 			gfps = new;
760 			new += nr_gfps++;
761 
762 			new->flags = gfp_flags;
763 			new->human_readable = strdup(str + 10);
764 			new->compact_str = compact_gfp_flags(str + 10);
765 			if (!new->human_readable || !new->compact_str)
766 				return -ENOMEM;
767 
768 			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
769 		}
770 
771 		str = strtok_r(NULL, " ", &pos);
772 	}
773 
774 	trace_seq_destroy(&seq);
775 	return 0;
776 }
777 
778 static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
779 						struct perf_sample *sample)
780 {
781 	u64 page;
782 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
783 	unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
784 	unsigned int migrate_type = perf_evsel__intval(evsel, sample,
785 						       "migratetype");
786 	u64 bytes = kmem_page_size << order;
787 	u64 callsite;
788 	struct page_stat *pstat;
789 	struct page_stat this = {
790 		.order = order,
791 		.gfp_flags = gfp_flags,
792 		.migrate_type = migrate_type,
793 	};
794 
795 	if (use_pfn)
796 		page = perf_evsel__intval(evsel, sample, "pfn");
797 	else
798 		page = perf_evsel__intval(evsel, sample, "page");
799 
800 	nr_page_allocs++;
801 	total_page_alloc_bytes += bytes;
802 
803 	if (!valid_page(page)) {
804 		nr_page_fails++;
805 		total_page_fail_bytes += bytes;
806 
807 		return 0;
808 	}
809 
810 	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
811 		return -1;
812 
813 	callsite = find_callsite(evsel, sample);
814 
815 	/*
816 	 * This is to find the current page (with correct gfp flags and
817 	 * migrate type) at free event.
818 	 */
819 	this.page = page;
820 	pstat = page_stat__findnew_page(&this);
821 	if (pstat == NULL)
822 		return -ENOMEM;
823 
824 	pstat->nr_alloc++;
825 	pstat->alloc_bytes += bytes;
826 	pstat->callsite = callsite;
827 
828 	if (!live_page) {
829 		pstat = page_stat__findnew_alloc(&this);
830 		if (pstat == NULL)
831 			return -ENOMEM;
832 
833 		pstat->nr_alloc++;
834 		pstat->alloc_bytes += bytes;
835 		pstat->callsite = callsite;
836 	}
837 
838 	this.callsite = callsite;
839 	pstat = page_stat__findnew_caller(&this);
840 	if (pstat == NULL)
841 		return -ENOMEM;
842 
843 	pstat->nr_alloc++;
844 	pstat->alloc_bytes += bytes;
845 
846 	order_stats[order][migrate_type]++;
847 
848 	return 0;
849 }
850 
851 static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
852 						struct perf_sample *sample)
853 {
854 	u64 page;
855 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
856 	u64 bytes = kmem_page_size << order;
857 	struct page_stat *pstat;
858 	struct page_stat this = {
859 		.order = order,
860 	};
861 
862 	if (use_pfn)
863 		page = perf_evsel__intval(evsel, sample, "pfn");
864 	else
865 		page = perf_evsel__intval(evsel, sample, "page");
866 
867 	nr_page_frees++;
868 	total_page_free_bytes += bytes;
869 
870 	this.page = page;
871 	pstat = page_stat__find_page(&this);
872 	if (pstat == NULL) {
873 		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
874 			  page, order);
875 
876 		nr_page_nomatch++;
877 		total_page_nomatch_bytes += bytes;
878 
879 		return 0;
880 	}
881 
882 	this.gfp_flags = pstat->gfp_flags;
883 	this.migrate_type = pstat->migrate_type;
884 	this.callsite = pstat->callsite;
885 
886 	rb_erase(&pstat->node, &page_live_tree);
887 	free(pstat);
888 
889 	if (live_page) {
890 		order_stats[this.order][this.migrate_type]--;
891 	} else {
892 		pstat = page_stat__find_alloc(&this);
893 		if (pstat == NULL)
894 			return -ENOMEM;
895 
896 		pstat->nr_free++;
897 		pstat->free_bytes += bytes;
898 	}
899 
900 	pstat = page_stat__find_caller(&this);
901 	if (pstat == NULL)
902 		return -ENOENT;
903 
904 	pstat->nr_free++;
905 	pstat->free_bytes += bytes;
906 
907 	if (live_page) {
908 		pstat->nr_alloc--;
909 		pstat->alloc_bytes -= bytes;
910 
911 		if (pstat->nr_alloc == 0) {
912 			rb_erase(&pstat->node, &page_caller_tree);
913 			free(pstat);
914 		}
915 	}
916 
917 	return 0;
918 }
919 
920 static bool perf_kmem__skip_sample(struct perf_sample *sample)
921 {
922 	/* skip sample based on time? */
923 	if (perf_time__skip_sample(&ptime, sample->time))
924 		return true;
925 
926 	return false;
927 }
928 
929 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
930 				  struct perf_sample *sample);
931 
932 static int process_sample_event(struct perf_tool *tool __maybe_unused,
933 				union perf_event *event,
934 				struct perf_sample *sample,
935 				struct perf_evsel *evsel,
936 				struct machine *machine)
937 {
938 	int err = 0;
939 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
940 							sample->tid);
941 
942 	if (thread == NULL) {
943 		pr_debug("problem processing %d event, skipping it.\n",
944 			 event->header.type);
945 		return -1;
946 	}
947 
948 	if (perf_kmem__skip_sample(sample))
949 		return 0;
950 
951 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
952 
953 	if (evsel->handler != NULL) {
954 		tracepoint_handler f = evsel->handler;
955 		err = f(evsel, sample);
956 	}
957 
958 	thread__put(thread);
959 
960 	return err;
961 }
962 
963 static struct perf_tool perf_kmem = {
964 	.sample		 = process_sample_event,
965 	.comm		 = perf_event__process_comm,
966 	.mmap		 = perf_event__process_mmap,
967 	.mmap2		 = perf_event__process_mmap2,
968 	.ordered_events	 = true,
969 };
970 
971 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
972 {
973 	if (n_alloc == 0)
974 		return 0.0;
975 	else
976 		return 100.0 - (100.0 * n_req / n_alloc);
977 }
978 
979 static void __print_slab_result(struct rb_root *root,
980 				struct perf_session *session,
981 				int n_lines, int is_caller)
982 {
983 	struct rb_node *next;
984 	struct machine *machine = &session->machines.host;
985 
986 	printf("%.105s\n", graph_dotted_line);
987 	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
988 	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
989 	printf("%.105s\n", graph_dotted_line);
990 
991 	next = rb_first(root);
992 
993 	while (next && n_lines--) {
994 		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
995 						   node);
996 		struct symbol *sym = NULL;
997 		struct map *map;
998 		char buf[BUFSIZ];
999 		u64 addr;
1000 
1001 		if (is_caller) {
1002 			addr = data->call_site;
1003 			if (!raw_ip)
1004 				sym = machine__find_kernel_function(machine, addr, &map);
1005 		} else
1006 			addr = data->ptr;
1007 
1008 		if (sym != NULL)
1009 			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1010 				 addr - map->unmap_ip(map, sym->start));
1011 		else
1012 			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1013 		printf(" %-34s |", buf);
1014 
1015 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1016 		       (unsigned long long)data->bytes_alloc,
1017 		       (unsigned long)data->bytes_alloc / data->hit,
1018 		       (unsigned long long)data->bytes_req,
1019 		       (unsigned long)data->bytes_req / data->hit,
1020 		       (unsigned long)data->hit,
1021 		       (unsigned long)data->pingpong,
1022 		       fragmentation(data->bytes_req, data->bytes_alloc));
1023 
1024 		next = rb_next(next);
1025 	}
1026 
1027 	if (n_lines == -1)
1028 		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1029 
1030 	printf("%.105s\n", graph_dotted_line);
1031 }
1032 
1033 static const char * const migrate_type_str[] = {
1034 	"UNMOVABL",
1035 	"RECLAIM",
1036 	"MOVABLE",
1037 	"RESERVED",
1038 	"CMA/ISLT",
1039 	"UNKNOWN",
1040 };
1041 
1042 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1043 {
1044 	struct rb_node *next = rb_first(&page_alloc_sorted);
1045 	struct machine *machine = &session->machines.host;
1046 	const char *format;
1047 	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1048 
1049 	printf("\n%.105s\n", graph_dotted_line);
1050 	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1051 	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1052 	       gfp_len, "GFP flags");
1053 	printf("%.105s\n", graph_dotted_line);
1054 
1055 	if (use_pfn)
1056 		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1057 	else
1058 		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1059 
1060 	while (next && n_lines--) {
1061 		struct page_stat *data;
1062 		struct symbol *sym;
1063 		struct map *map;
1064 		char buf[32];
1065 		char *caller = buf;
1066 
1067 		data = rb_entry(next, struct page_stat, node);
1068 		sym = machine__find_kernel_function(machine, data->callsite, &map);
1069 		if (sym && sym->name)
1070 			caller = sym->name;
1071 		else
1072 			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1073 
1074 		printf(format, (unsigned long long)data->page,
1075 		       (unsigned long long)data->alloc_bytes / 1024,
1076 		       data->nr_alloc, data->order,
1077 		       migrate_type_str[data->migrate_type],
1078 		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1079 
1080 		next = rb_next(next);
1081 	}
1082 
1083 	if (n_lines == -1) {
1084 		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1085 		       gfp_len, "...");
1086 	}
1087 
1088 	printf("%.105s\n", graph_dotted_line);
1089 }
1090 
1091 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1092 {
1093 	struct rb_node *next = rb_first(&page_caller_sorted);
1094 	struct machine *machine = &session->machines.host;
1095 	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1096 
1097 	printf("\n%.105s\n", graph_dotted_line);
1098 	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1099 	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1100 	printf("%.105s\n", graph_dotted_line);
1101 
1102 	while (next && n_lines--) {
1103 		struct page_stat *data;
1104 		struct symbol *sym;
1105 		struct map *map;
1106 		char buf[32];
1107 		char *caller = buf;
1108 
1109 		data = rb_entry(next, struct page_stat, node);
1110 		sym = machine__find_kernel_function(machine, data->callsite, &map);
1111 		if (sym && sym->name)
1112 			caller = sym->name;
1113 		else
1114 			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1115 
1116 		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1117 		       (unsigned long long)data->alloc_bytes / 1024,
1118 		       data->nr_alloc, data->order,
1119 		       migrate_type_str[data->migrate_type],
1120 		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1121 
1122 		next = rb_next(next);
1123 	}
1124 
1125 	if (n_lines == -1) {
1126 		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1127 		       gfp_len, "...");
1128 	}
1129 
1130 	printf("%.105s\n", graph_dotted_line);
1131 }
1132 
1133 static void print_gfp_flags(void)
1134 {
1135 	int i;
1136 
1137 	printf("#\n");
1138 	printf("# GFP flags\n");
1139 	printf("# ---------\n");
1140 	for (i = 0; i < nr_gfps; i++) {
1141 		printf("# %08x: %*s: %s\n", gfps[i].flags,
1142 		       (int) max_gfp_len, gfps[i].compact_str,
1143 		       gfps[i].human_readable);
1144 	}
1145 }
1146 
1147 static void print_slab_summary(void)
1148 {
1149 	printf("\nSUMMARY (SLAB allocator)");
1150 	printf("\n========================\n");
1151 	printf("Total bytes requested: %'lu\n", total_requested);
1152 	printf("Total bytes allocated: %'lu\n", total_allocated);
1153 	printf("Total bytes freed:     %'lu\n", total_freed);
1154 	if (total_allocated > total_freed) {
1155 		printf("Net total bytes allocated: %'lu\n",
1156 		total_allocated - total_freed);
1157 	}
1158 	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1159 	       total_allocated - total_requested);
1160 	printf("Internal fragmentation: %f%%\n",
1161 	       fragmentation(total_requested, total_allocated));
1162 	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1163 }
1164 
1165 static void print_page_summary(void)
1166 {
1167 	int o, m;
1168 	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1169 	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1170 
1171 	printf("\nSUMMARY (page allocator)");
1172 	printf("\n========================\n");
1173 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1174 	       nr_page_allocs, total_page_alloc_bytes / 1024);
1175 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1176 	       nr_page_frees, total_page_free_bytes / 1024);
1177 	printf("\n");
1178 
1179 	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1180 	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1181 	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1182 	       nr_page_allocs - nr_alloc_freed,
1183 	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1184 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1185 	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1186 	printf("\n");
1187 
1188 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1189 	       nr_page_fails, total_page_fail_bytes / 1024);
1190 	printf("\n");
1191 
1192 	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1193 	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1194 	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1195 	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1196 	       graph_dotted_line, graph_dotted_line);
1197 
1198 	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1199 		printf("%5d", o);
1200 		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1201 			if (order_stats[o][m])
1202 				printf("  %'12d", order_stats[o][m]);
1203 			else
1204 				printf("  %12c", '.');
1205 		}
1206 		printf("\n");
1207 	}
1208 }
1209 
1210 static void print_slab_result(struct perf_session *session)
1211 {
1212 	if (caller_flag)
1213 		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1214 	if (alloc_flag)
1215 		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1216 	print_slab_summary();
1217 }
1218 
1219 static void print_page_result(struct perf_session *session)
1220 {
1221 	if (caller_flag || alloc_flag)
1222 		print_gfp_flags();
1223 	if (caller_flag)
1224 		__print_page_caller_result(session, caller_lines);
1225 	if (alloc_flag)
1226 		__print_page_alloc_result(session, alloc_lines);
1227 	print_page_summary();
1228 }
1229 
1230 static void print_result(struct perf_session *session)
1231 {
1232 	if (kmem_slab)
1233 		print_slab_result(session);
1234 	if (kmem_page)
1235 		print_page_result(session);
1236 }
1237 
1238 static LIST_HEAD(slab_caller_sort);
1239 static LIST_HEAD(slab_alloc_sort);
1240 static LIST_HEAD(page_caller_sort);
1241 static LIST_HEAD(page_alloc_sort);
1242 
1243 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1244 			     struct list_head *sort_list)
1245 {
1246 	struct rb_node **new = &(root->rb_node);
1247 	struct rb_node *parent = NULL;
1248 	struct sort_dimension *sort;
1249 
1250 	while (*new) {
1251 		struct alloc_stat *this;
1252 		int cmp = 0;
1253 
1254 		this = rb_entry(*new, struct alloc_stat, node);
1255 		parent = *new;
1256 
1257 		list_for_each_entry(sort, sort_list, list) {
1258 			cmp = sort->cmp(data, this);
1259 			if (cmp)
1260 				break;
1261 		}
1262 
1263 		if (cmp > 0)
1264 			new = &((*new)->rb_left);
1265 		else
1266 			new = &((*new)->rb_right);
1267 	}
1268 
1269 	rb_link_node(&data->node, parent, new);
1270 	rb_insert_color(&data->node, root);
1271 }
1272 
1273 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1274 			       struct list_head *sort_list)
1275 {
1276 	struct rb_node *node;
1277 	struct alloc_stat *data;
1278 
1279 	for (;;) {
1280 		node = rb_first(root);
1281 		if (!node)
1282 			break;
1283 
1284 		rb_erase(node, root);
1285 		data = rb_entry(node, struct alloc_stat, node);
1286 		sort_slab_insert(root_sorted, data, sort_list);
1287 	}
1288 }
1289 
1290 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1291 			     struct list_head *sort_list)
1292 {
1293 	struct rb_node **new = &root->rb_node;
1294 	struct rb_node *parent = NULL;
1295 	struct sort_dimension *sort;
1296 
1297 	while (*new) {
1298 		struct page_stat *this;
1299 		int cmp = 0;
1300 
1301 		this = rb_entry(*new, struct page_stat, node);
1302 		parent = *new;
1303 
1304 		list_for_each_entry(sort, sort_list, list) {
1305 			cmp = sort->cmp(data, this);
1306 			if (cmp)
1307 				break;
1308 		}
1309 
1310 		if (cmp > 0)
1311 			new = &parent->rb_left;
1312 		else
1313 			new = &parent->rb_right;
1314 	}
1315 
1316 	rb_link_node(&data->node, parent, new);
1317 	rb_insert_color(&data->node, root);
1318 }
1319 
1320 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1321 			       struct list_head *sort_list)
1322 {
1323 	struct rb_node *node;
1324 	struct page_stat *data;
1325 
1326 	for (;;) {
1327 		node = rb_first(root);
1328 		if (!node)
1329 			break;
1330 
1331 		rb_erase(node, root);
1332 		data = rb_entry(node, struct page_stat, node);
1333 		sort_page_insert(root_sorted, data, sort_list);
1334 	}
1335 }
1336 
1337 static void sort_result(void)
1338 {
1339 	if (kmem_slab) {
1340 		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1341 				   &slab_alloc_sort);
1342 		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1343 				   &slab_caller_sort);
1344 	}
1345 	if (kmem_page) {
1346 		if (live_page)
1347 			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1348 					   &page_alloc_sort);
1349 		else
1350 			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1351 					   &page_alloc_sort);
1352 
1353 		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1354 				   &page_caller_sort);
1355 	}
1356 }
1357 
1358 static int __cmd_kmem(struct perf_session *session)
1359 {
1360 	int err = -EINVAL;
1361 	struct perf_evsel *evsel;
1362 	const struct perf_evsel_str_handler kmem_tracepoints[] = {
1363 		/* slab allocator */
1364 		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
1365     		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
1366 		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
1367     		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1368 		{ "kmem:kfree",			perf_evsel__process_free_event, },
1369     		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
1370 		/* page allocator */
1371 		{ "kmem:mm_page_alloc",		perf_evsel__process_page_alloc_event, },
1372 		{ "kmem:mm_page_free",		perf_evsel__process_page_free_event, },
1373 	};
1374 
1375 	if (!perf_session__has_traces(session, "kmem record"))
1376 		goto out;
1377 
1378 	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1379 		pr_err("Initializing perf session tracepoint handlers failed\n");
1380 		goto out;
1381 	}
1382 
1383 	evlist__for_each_entry(session->evlist, evsel) {
1384 		if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1385 		    perf_evsel__field(evsel, "pfn")) {
1386 			use_pfn = true;
1387 			break;
1388 		}
1389 	}
1390 
1391 	setup_pager();
1392 	err = perf_session__process_events(session);
1393 	if (err != 0) {
1394 		pr_err("error during process events: %d\n", err);
1395 		goto out;
1396 	}
1397 	sort_result();
1398 	print_result(session);
1399 out:
1400 	return err;
1401 }
1402 
1403 /* slab sort keys */
1404 static int ptr_cmp(void *a, void *b)
1405 {
1406 	struct alloc_stat *l = a;
1407 	struct alloc_stat *r = b;
1408 
1409 	if (l->ptr < r->ptr)
1410 		return -1;
1411 	else if (l->ptr > r->ptr)
1412 		return 1;
1413 	return 0;
1414 }
1415 
1416 static struct sort_dimension ptr_sort_dimension = {
1417 	.name	= "ptr",
1418 	.cmp	= ptr_cmp,
1419 };
1420 
1421 static int slab_callsite_cmp(void *a, void *b)
1422 {
1423 	struct alloc_stat *l = a;
1424 	struct alloc_stat *r = b;
1425 
1426 	if (l->call_site < r->call_site)
1427 		return -1;
1428 	else if (l->call_site > r->call_site)
1429 		return 1;
1430 	return 0;
1431 }
1432 
1433 static struct sort_dimension callsite_sort_dimension = {
1434 	.name	= "callsite",
1435 	.cmp	= slab_callsite_cmp,
1436 };
1437 
1438 static int hit_cmp(void *a, void *b)
1439 {
1440 	struct alloc_stat *l = a;
1441 	struct alloc_stat *r = b;
1442 
1443 	if (l->hit < r->hit)
1444 		return -1;
1445 	else if (l->hit > r->hit)
1446 		return 1;
1447 	return 0;
1448 }
1449 
1450 static struct sort_dimension hit_sort_dimension = {
1451 	.name	= "hit",
1452 	.cmp	= hit_cmp,
1453 };
1454 
1455 static int bytes_cmp(void *a, void *b)
1456 {
1457 	struct alloc_stat *l = a;
1458 	struct alloc_stat *r = b;
1459 
1460 	if (l->bytes_alloc < r->bytes_alloc)
1461 		return -1;
1462 	else if (l->bytes_alloc > r->bytes_alloc)
1463 		return 1;
1464 	return 0;
1465 }
1466 
1467 static struct sort_dimension bytes_sort_dimension = {
1468 	.name	= "bytes",
1469 	.cmp	= bytes_cmp,
1470 };
1471 
1472 static int frag_cmp(void *a, void *b)
1473 {
1474 	double x, y;
1475 	struct alloc_stat *l = a;
1476 	struct alloc_stat *r = b;
1477 
1478 	x = fragmentation(l->bytes_req, l->bytes_alloc);
1479 	y = fragmentation(r->bytes_req, r->bytes_alloc);
1480 
1481 	if (x < y)
1482 		return -1;
1483 	else if (x > y)
1484 		return 1;
1485 	return 0;
1486 }
1487 
1488 static struct sort_dimension frag_sort_dimension = {
1489 	.name	= "frag",
1490 	.cmp	= frag_cmp,
1491 };
1492 
1493 static int pingpong_cmp(void *a, void *b)
1494 {
1495 	struct alloc_stat *l = a;
1496 	struct alloc_stat *r = b;
1497 
1498 	if (l->pingpong < r->pingpong)
1499 		return -1;
1500 	else if (l->pingpong > r->pingpong)
1501 		return 1;
1502 	return 0;
1503 }
1504 
1505 static struct sort_dimension pingpong_sort_dimension = {
1506 	.name	= "pingpong",
1507 	.cmp	= pingpong_cmp,
1508 };
1509 
1510 /* page sort keys */
1511 static int page_cmp(void *a, void *b)
1512 {
1513 	struct page_stat *l = a;
1514 	struct page_stat *r = b;
1515 
1516 	if (l->page < r->page)
1517 		return -1;
1518 	else if (l->page > r->page)
1519 		return 1;
1520 	return 0;
1521 }
1522 
1523 static struct sort_dimension page_sort_dimension = {
1524 	.name	= "page",
1525 	.cmp	= page_cmp,
1526 };
1527 
1528 static int page_callsite_cmp(void *a, void *b)
1529 {
1530 	struct page_stat *l = a;
1531 	struct page_stat *r = b;
1532 
1533 	if (l->callsite < r->callsite)
1534 		return -1;
1535 	else if (l->callsite > r->callsite)
1536 		return 1;
1537 	return 0;
1538 }
1539 
1540 static struct sort_dimension page_callsite_sort_dimension = {
1541 	.name	= "callsite",
1542 	.cmp	= page_callsite_cmp,
1543 };
1544 
1545 static int page_hit_cmp(void *a, void *b)
1546 {
1547 	struct page_stat *l = a;
1548 	struct page_stat *r = b;
1549 
1550 	if (l->nr_alloc < r->nr_alloc)
1551 		return -1;
1552 	else if (l->nr_alloc > r->nr_alloc)
1553 		return 1;
1554 	return 0;
1555 }
1556 
1557 static struct sort_dimension page_hit_sort_dimension = {
1558 	.name	= "hit",
1559 	.cmp	= page_hit_cmp,
1560 };
1561 
1562 static int page_bytes_cmp(void *a, void *b)
1563 {
1564 	struct page_stat *l = a;
1565 	struct page_stat *r = b;
1566 
1567 	if (l->alloc_bytes < r->alloc_bytes)
1568 		return -1;
1569 	else if (l->alloc_bytes > r->alloc_bytes)
1570 		return 1;
1571 	return 0;
1572 }
1573 
1574 static struct sort_dimension page_bytes_sort_dimension = {
1575 	.name	= "bytes",
1576 	.cmp	= page_bytes_cmp,
1577 };
1578 
1579 static int page_order_cmp(void *a, void *b)
1580 {
1581 	struct page_stat *l = a;
1582 	struct page_stat *r = b;
1583 
1584 	if (l->order < r->order)
1585 		return -1;
1586 	else if (l->order > r->order)
1587 		return 1;
1588 	return 0;
1589 }
1590 
1591 static struct sort_dimension page_order_sort_dimension = {
1592 	.name	= "order",
1593 	.cmp	= page_order_cmp,
1594 };
1595 
1596 static int migrate_type_cmp(void *a, void *b)
1597 {
1598 	struct page_stat *l = a;
1599 	struct page_stat *r = b;
1600 
1601 	/* for internal use to find free'd page */
1602 	if (l->migrate_type == -1U)
1603 		return 0;
1604 
1605 	if (l->migrate_type < r->migrate_type)
1606 		return -1;
1607 	else if (l->migrate_type > r->migrate_type)
1608 		return 1;
1609 	return 0;
1610 }
1611 
1612 static struct sort_dimension migrate_type_sort_dimension = {
1613 	.name	= "migtype",
1614 	.cmp	= migrate_type_cmp,
1615 };
1616 
1617 static int gfp_flags_cmp(void *a, void *b)
1618 {
1619 	struct page_stat *l = a;
1620 	struct page_stat *r = b;
1621 
1622 	/* for internal use to find free'd page */
1623 	if (l->gfp_flags == -1U)
1624 		return 0;
1625 
1626 	if (l->gfp_flags < r->gfp_flags)
1627 		return -1;
1628 	else if (l->gfp_flags > r->gfp_flags)
1629 		return 1;
1630 	return 0;
1631 }
1632 
1633 static struct sort_dimension gfp_flags_sort_dimension = {
1634 	.name	= "gfp",
1635 	.cmp	= gfp_flags_cmp,
1636 };
1637 
1638 static struct sort_dimension *slab_sorts[] = {
1639 	&ptr_sort_dimension,
1640 	&callsite_sort_dimension,
1641 	&hit_sort_dimension,
1642 	&bytes_sort_dimension,
1643 	&frag_sort_dimension,
1644 	&pingpong_sort_dimension,
1645 };
1646 
1647 static struct sort_dimension *page_sorts[] = {
1648 	&page_sort_dimension,
1649 	&page_callsite_sort_dimension,
1650 	&page_hit_sort_dimension,
1651 	&page_bytes_sort_dimension,
1652 	&page_order_sort_dimension,
1653 	&migrate_type_sort_dimension,
1654 	&gfp_flags_sort_dimension,
1655 };
1656 
1657 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1658 {
1659 	struct sort_dimension *sort;
1660 	int i;
1661 
1662 	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1663 		if (!strcmp(slab_sorts[i]->name, tok)) {
1664 			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1665 			if (!sort) {
1666 				pr_err("%s: memdup failed\n", __func__);
1667 				return -1;
1668 			}
1669 			list_add_tail(&sort->list, list);
1670 			return 0;
1671 		}
1672 	}
1673 
1674 	return -1;
1675 }
1676 
1677 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1678 {
1679 	struct sort_dimension *sort;
1680 	int i;
1681 
1682 	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1683 		if (!strcmp(page_sorts[i]->name, tok)) {
1684 			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1685 			if (!sort) {
1686 				pr_err("%s: memdup failed\n", __func__);
1687 				return -1;
1688 			}
1689 			list_add_tail(&sort->list, list);
1690 			return 0;
1691 		}
1692 	}
1693 
1694 	return -1;
1695 }
1696 
1697 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1698 {
1699 	char *tok;
1700 	char *str = strdup(arg);
1701 	char *pos = str;
1702 
1703 	if (!str) {
1704 		pr_err("%s: strdup failed\n", __func__);
1705 		return -1;
1706 	}
1707 
1708 	while (true) {
1709 		tok = strsep(&pos, ",");
1710 		if (!tok)
1711 			break;
1712 		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1713 			error("Unknown slab --sort key: '%s'", tok);
1714 			free(str);
1715 			return -1;
1716 		}
1717 	}
1718 
1719 	free(str);
1720 	return 0;
1721 }
1722 
1723 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1724 {
1725 	char *tok;
1726 	char *str = strdup(arg);
1727 	char *pos = str;
1728 
1729 	if (!str) {
1730 		pr_err("%s: strdup failed\n", __func__);
1731 		return -1;
1732 	}
1733 
1734 	while (true) {
1735 		tok = strsep(&pos, ",");
1736 		if (!tok)
1737 			break;
1738 		if (page_sort_dimension__add(tok, sort_list) < 0) {
1739 			error("Unknown page --sort key: '%s'", tok);
1740 			free(str);
1741 			return -1;
1742 		}
1743 	}
1744 
1745 	free(str);
1746 	return 0;
1747 }
1748 
1749 static int parse_sort_opt(const struct option *opt __maybe_unused,
1750 			  const char *arg, int unset __maybe_unused)
1751 {
1752 	if (!arg)
1753 		return -1;
1754 
1755 	if (kmem_page > kmem_slab ||
1756 	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1757 		if (caller_flag > alloc_flag)
1758 			return setup_page_sorting(&page_caller_sort, arg);
1759 		else
1760 			return setup_page_sorting(&page_alloc_sort, arg);
1761 	} else {
1762 		if (caller_flag > alloc_flag)
1763 			return setup_slab_sorting(&slab_caller_sort, arg);
1764 		else
1765 			return setup_slab_sorting(&slab_alloc_sort, arg);
1766 	}
1767 
1768 	return 0;
1769 }
1770 
1771 static int parse_caller_opt(const struct option *opt __maybe_unused,
1772 			    const char *arg __maybe_unused,
1773 			    int unset __maybe_unused)
1774 {
1775 	caller_flag = (alloc_flag + 1);
1776 	return 0;
1777 }
1778 
1779 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1780 			   const char *arg __maybe_unused,
1781 			   int unset __maybe_unused)
1782 {
1783 	alloc_flag = (caller_flag + 1);
1784 	return 0;
1785 }
1786 
1787 static int parse_slab_opt(const struct option *opt __maybe_unused,
1788 			  const char *arg __maybe_unused,
1789 			  int unset __maybe_unused)
1790 {
1791 	kmem_slab = (kmem_page + 1);
1792 	return 0;
1793 }
1794 
1795 static int parse_page_opt(const struct option *opt __maybe_unused,
1796 			  const char *arg __maybe_unused,
1797 			  int unset __maybe_unused)
1798 {
1799 	kmem_page = (kmem_slab + 1);
1800 	return 0;
1801 }
1802 
1803 static int parse_line_opt(const struct option *opt __maybe_unused,
1804 			  const char *arg, int unset __maybe_unused)
1805 {
1806 	int lines;
1807 
1808 	if (!arg)
1809 		return -1;
1810 
1811 	lines = strtoul(arg, NULL, 10);
1812 
1813 	if (caller_flag > alloc_flag)
1814 		caller_lines = lines;
1815 	else
1816 		alloc_lines = lines;
1817 
1818 	return 0;
1819 }
1820 
1821 static int __cmd_record(int argc, const char **argv)
1822 {
1823 	const char * const record_args[] = {
1824 	"record", "-a", "-R", "-c", "1",
1825 	};
1826 	const char * const slab_events[] = {
1827 	"-e", "kmem:kmalloc",
1828 	"-e", "kmem:kmalloc_node",
1829 	"-e", "kmem:kfree",
1830 	"-e", "kmem:kmem_cache_alloc",
1831 	"-e", "kmem:kmem_cache_alloc_node",
1832 	"-e", "kmem:kmem_cache_free",
1833 	};
1834 	const char * const page_events[] = {
1835 	"-e", "kmem:mm_page_alloc",
1836 	"-e", "kmem:mm_page_free",
1837 	};
1838 	unsigned int rec_argc, i, j;
1839 	const char **rec_argv;
1840 
1841 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1842 	if (kmem_slab)
1843 		rec_argc += ARRAY_SIZE(slab_events);
1844 	if (kmem_page)
1845 		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1846 
1847 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1848 
1849 	if (rec_argv == NULL)
1850 		return -ENOMEM;
1851 
1852 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1853 		rec_argv[i] = strdup(record_args[i]);
1854 
1855 	if (kmem_slab) {
1856 		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1857 			rec_argv[i] = strdup(slab_events[j]);
1858 	}
1859 	if (kmem_page) {
1860 		rec_argv[i++] = strdup("-g");
1861 
1862 		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1863 			rec_argv[i] = strdup(page_events[j]);
1864 	}
1865 
1866 	for (j = 1; j < (unsigned int)argc; j++, i++)
1867 		rec_argv[i] = argv[j];
1868 
1869 	return cmd_record(i, rec_argv, NULL);
1870 }
1871 
1872 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1873 {
1874 	if (!strcmp(var, "kmem.default")) {
1875 		if (!strcmp(value, "slab"))
1876 			kmem_default = KMEM_SLAB;
1877 		else if (!strcmp(value, "page"))
1878 			kmem_default = KMEM_PAGE;
1879 		else
1880 			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1881 			       value);
1882 		return 0;
1883 	}
1884 
1885 	return 0;
1886 }
1887 
1888 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
1889 {
1890 	const char * const default_slab_sort = "frag,hit,bytes";
1891 	const char * const default_page_sort = "bytes,hit";
1892 	struct perf_data_file file = {
1893 		.mode = PERF_DATA_MODE_READ,
1894 	};
1895 	const struct option kmem_options[] = {
1896 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1897 	OPT_INCR('v', "verbose", &verbose,
1898 		    "be more verbose (show symbol address, etc)"),
1899 	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1900 			   "show per-callsite statistics", parse_caller_opt),
1901 	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1902 			   "show per-allocation statistics", parse_alloc_opt),
1903 	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1904 		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1905 		     "page, order, migtype, gfp", parse_sort_opt),
1906 	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1907 	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1908 	OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1909 	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1910 			   parse_slab_opt),
1911 	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1912 			   parse_page_opt),
1913 	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1914 	OPT_STRING(0, "time", &time_str, "str",
1915 		   "Time span of interest (start,stop)"),
1916 	OPT_END()
1917 	};
1918 	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1919 	const char *kmem_usage[] = {
1920 		NULL,
1921 		NULL
1922 	};
1923 	struct perf_session *session;
1924 	int ret = -1;
1925 	const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1926 
1927 	perf_config(kmem_config, NULL);
1928 	argc = parse_options_subcommand(argc, argv, kmem_options,
1929 					kmem_subcommands, kmem_usage, 0);
1930 
1931 	if (!argc)
1932 		usage_with_options(kmem_usage, kmem_options);
1933 
1934 	if (kmem_slab == 0 && kmem_page == 0) {
1935 		if (kmem_default == KMEM_SLAB)
1936 			kmem_slab = 1;
1937 		else
1938 			kmem_page = 1;
1939 	}
1940 
1941 	if (!strncmp(argv[0], "rec", 3)) {
1942 		symbol__init(NULL);
1943 		return __cmd_record(argc, argv);
1944 	}
1945 
1946 	file.path = input_name;
1947 
1948 	kmem_session = session = perf_session__new(&file, false, &perf_kmem);
1949 	if (session == NULL)
1950 		return -1;
1951 
1952 	if (kmem_slab) {
1953 		if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1954 							  "kmem:kmalloc")) {
1955 			pr_err(errmsg, "slab", "slab");
1956 			goto out_delete;
1957 		}
1958 	}
1959 
1960 	if (kmem_page) {
1961 		struct perf_evsel *evsel;
1962 
1963 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1964 							     "kmem:mm_page_alloc");
1965 		if (evsel == NULL) {
1966 			pr_err(errmsg, "page", "page");
1967 			goto out_delete;
1968 		}
1969 
1970 		kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
1971 		symbol_conf.use_callchain = true;
1972 	}
1973 
1974 	symbol__init(&session->header.env);
1975 
1976 	if (perf_time__parse_str(&ptime, time_str) != 0) {
1977 		pr_err("Invalid time string\n");
1978 		return -EINVAL;
1979 	}
1980 
1981 	if (!strcmp(argv[0], "stat")) {
1982 		setlocale(LC_ALL, "");
1983 
1984 		if (cpu__setup_cpunode_map())
1985 			goto out_delete;
1986 
1987 		if (list_empty(&slab_caller_sort))
1988 			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1989 		if (list_empty(&slab_alloc_sort))
1990 			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1991 		if (list_empty(&page_caller_sort))
1992 			setup_page_sorting(&page_caller_sort, default_page_sort);
1993 		if (list_empty(&page_alloc_sort))
1994 			setup_page_sorting(&page_alloc_sort, default_page_sort);
1995 
1996 		if (kmem_page) {
1997 			setup_page_sorting(&page_alloc_sort_input,
1998 					   "page,order,migtype,gfp");
1999 			setup_page_sorting(&page_caller_sort_input,
2000 					   "callsite,order,migtype,gfp");
2001 		}
2002 		ret = __cmd_kmem(session);
2003 	} else
2004 		usage_with_options(kmem_usage, kmem_options);
2005 
2006 out_delete:
2007 	perf_session__delete(session);
2008 
2009 	return ret;
2010 }
2011 
2012