xref: /linux/include/trace/events/kmem.h (revision beb69e81724634063b9dbae4bc79e2e011fdeeb1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM kmem
4 
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_KMEM_H
7 
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
11 
12 TRACE_EVENT(kmem_cache_alloc,
13 
14 	TP_PROTO(unsigned long call_site,
15 		 const void *ptr,
16 		 struct kmem_cache *s,
17 		 gfp_t gfp_flags,
18 		 int node),
19 
20 	TP_ARGS(call_site, ptr, s, gfp_flags, node),
21 
22 	TP_STRUCT__entry(
23 		__field(	unsigned long,	call_site	)
24 		__field(	const void *,	ptr		)
25 		__field(	size_t,		bytes_req	)
26 		__field(	size_t,		bytes_alloc	)
27 		__field(	unsigned long,	gfp_flags	)
28 		__field(	int,		node		)
29 		__field(	bool,		accounted	)
30 	),
31 
32 	TP_fast_assign(
33 		__entry->call_site	= call_site;
34 		__entry->ptr		= ptr;
35 		__entry->bytes_req	= s->object_size;
36 		__entry->bytes_alloc	= s->size;
37 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
38 		__entry->node		= node;
39 		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG) ?
40 					  ((gfp_flags & __GFP_ACCOUNT) ||
41 					  (s->flags & SLAB_ACCOUNT)) : false;
42 	),
43 
44 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
45 		(void *)__entry->call_site,
46 		__entry->ptr,
47 		__entry->bytes_req,
48 		__entry->bytes_alloc,
49 		show_gfp_flags(__entry->gfp_flags),
50 		__entry->node,
51 		__entry->accounted ? "true" : "false")
52 );
53 
54 TRACE_EVENT(kmalloc,
55 
56 	TP_PROTO(unsigned long call_site,
57 		 const void *ptr,
58 		 size_t bytes_req,
59 		 size_t bytes_alloc,
60 		 gfp_t gfp_flags,
61 		 int node),
62 
63 	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
64 
65 	TP_STRUCT__entry(
66 		__field(	unsigned long,	call_site	)
67 		__field(	const void *,	ptr		)
68 		__field(	size_t,		bytes_req	)
69 		__field(	size_t,		bytes_alloc	)
70 		__field(	unsigned long,	gfp_flags	)
71 		__field(	int,		node		)
72 	),
73 
74 	TP_fast_assign(
75 		__entry->call_site	= call_site;
76 		__entry->ptr		= ptr;
77 		__entry->bytes_req	= bytes_req;
78 		__entry->bytes_alloc	= bytes_alloc;
79 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
80 		__entry->node		= node;
81 	),
82 
83 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
84 		(void *)__entry->call_site,
85 		__entry->ptr,
86 		__entry->bytes_req,
87 		__entry->bytes_alloc,
88 		show_gfp_flags(__entry->gfp_flags),
89 		__entry->node,
90 		(IS_ENABLED(CONFIG_MEMCG) &&
91 		 (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
92 );
93 
94 TRACE_EVENT(kfree,
95 
96 	TP_PROTO(unsigned long call_site, const void *ptr),
97 
98 	TP_ARGS(call_site, ptr),
99 
100 	TP_STRUCT__entry(
101 		__field(	unsigned long,	call_site	)
102 		__field(	const void *,	ptr		)
103 	),
104 
105 	TP_fast_assign(
106 		__entry->call_site	= call_site;
107 		__entry->ptr		= ptr;
108 	),
109 
110 	TP_printk("call_site=%pS ptr=%p",
111 		  (void *)__entry->call_site, __entry->ptr)
112 );
113 
114 TRACE_EVENT(kmem_cache_free,
115 
116 	TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
117 
118 	TP_ARGS(call_site, ptr, s),
119 
120 	TP_STRUCT__entry(
121 		__field(	unsigned long,	call_site	)
122 		__field(	const void *,	ptr		)
123 		__string(	name,		s->name		)
124 	),
125 
126 	TP_fast_assign(
127 		__entry->call_site	= call_site;
128 		__entry->ptr		= ptr;
129 		__assign_str(name);
130 	),
131 
132 	TP_printk("call_site=%pS ptr=%p name=%s",
133 		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
134 );
135 
136 TRACE_EVENT(mm_page_free,
137 
138 	TP_PROTO(struct page *page, unsigned int order),
139 
140 	TP_ARGS(page, order),
141 
142 	TP_STRUCT__entry(
143 		__field(	unsigned long,	pfn		)
144 		__field(	unsigned int,	order		)
145 	),
146 
147 	TP_fast_assign(
148 		__entry->pfn		= page_to_pfn(page);
149 		__entry->order		= order;
150 	),
151 
152 	TP_printk("page=%p pfn=0x%lx order=%d",
153 			pfn_to_page(__entry->pfn),
154 			__entry->pfn,
155 			__entry->order)
156 );
157 
158 TRACE_EVENT(mm_page_free_batched,
159 
160 	TP_PROTO(struct page *page),
161 
162 	TP_ARGS(page),
163 
164 	TP_STRUCT__entry(
165 		__field(	unsigned long,	pfn		)
166 	),
167 
168 	TP_fast_assign(
169 		__entry->pfn		= page_to_pfn(page);
170 	),
171 
172 	TP_printk("page=%p pfn=0x%lx order=0",
173 			pfn_to_page(__entry->pfn),
174 			__entry->pfn)
175 );
176 
177 TRACE_EVENT(mm_page_alloc,
178 
179 	TP_PROTO(struct page *page, unsigned int order,
180 			gfp_t gfp_flags, int migratetype),
181 
182 	TP_ARGS(page, order, gfp_flags, migratetype),
183 
184 	TP_STRUCT__entry(
185 		__field(	unsigned long,	pfn		)
186 		__field(	unsigned int,	order		)
187 		__field(	unsigned long,	gfp_flags	)
188 		__field(	int,		migratetype	)
189 	),
190 
191 	TP_fast_assign(
192 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
193 		__entry->order		= order;
194 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
195 		__entry->migratetype	= migratetype;
196 	),
197 
198 	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
199 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
200 		__entry->pfn != -1UL ? __entry->pfn : 0,
201 		__entry->order,
202 		__entry->migratetype,
203 		show_gfp_flags(__entry->gfp_flags))
204 );
205 
206 DECLARE_EVENT_CLASS(mm_page,
207 
208 	TP_PROTO(struct page *page, unsigned int order, int migratetype,
209 		 int percpu_refill),
210 
211 	TP_ARGS(page, order, migratetype, percpu_refill),
212 
213 	TP_STRUCT__entry(
214 		__field(	unsigned long,	pfn		)
215 		__field(	unsigned int,	order		)
216 		__field(	int,		migratetype	)
217 		__field(	int,		percpu_refill	)
218 	),
219 
220 	TP_fast_assign(
221 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
222 		__entry->order		= order;
223 		__entry->migratetype	= migratetype;
224 		__entry->percpu_refill	= percpu_refill;
225 	),
226 
227 	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
228 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
229 		__entry->pfn != -1UL ? __entry->pfn : 0,
230 		__entry->order,
231 		__entry->migratetype,
232 		__entry->percpu_refill)
233 );
234 
235 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
236 
237 	TP_PROTO(struct page *page, unsigned int order, int migratetype,
238 		 int percpu_refill),
239 
240 	TP_ARGS(page, order, migratetype, percpu_refill)
241 );
242 
243 TRACE_EVENT(mm_page_pcpu_drain,
244 
245 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
246 
247 	TP_ARGS(page, order, migratetype),
248 
249 	TP_STRUCT__entry(
250 		__field(	unsigned long,	pfn		)
251 		__field(	unsigned int,	order		)
252 		__field(	int,		migratetype	)
253 	),
254 
255 	TP_fast_assign(
256 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
257 		__entry->order		= order;
258 		__entry->migratetype	= migratetype;
259 	),
260 
261 	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
262 		pfn_to_page(__entry->pfn), __entry->pfn,
263 		__entry->order, __entry->migratetype)
264 );
265 
266 TRACE_EVENT(mm_page_alloc_extfrag,
267 
268 	TP_PROTO(struct page *page,
269 		int alloc_order, int fallback_order,
270 		int alloc_migratetype, int fallback_migratetype),
271 
272 	TP_ARGS(page,
273 		alloc_order, fallback_order,
274 		alloc_migratetype, fallback_migratetype),
275 
276 	TP_STRUCT__entry(
277 		__field(	unsigned long,	pfn			)
278 		__field(	int,		alloc_order		)
279 		__field(	int,		fallback_order		)
280 		__field(	int,		alloc_migratetype	)
281 		__field(	int,		fallback_migratetype	)
282 		__field(	int,		change_ownership	)
283 	),
284 
285 	TP_fast_assign(
286 		__entry->pfn			= page_to_pfn(page);
287 		__entry->alloc_order		= alloc_order;
288 		__entry->fallback_order		= fallback_order;
289 		__entry->alloc_migratetype	= alloc_migratetype;
290 		__entry->fallback_migratetype	= fallback_migratetype;
291 		__entry->change_ownership	= (alloc_migratetype ==
292 					get_pageblock_migratetype(page));
293 	),
294 
295 	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
296 		pfn_to_page(__entry->pfn),
297 		__entry->pfn,
298 		__entry->alloc_order,
299 		__entry->fallback_order,
300 		pageblock_order,
301 		__entry->alloc_migratetype,
302 		__entry->fallback_migratetype,
303 		__entry->fallback_order < pageblock_order,
304 		__entry->change_ownership)
305 );
306 
307 #ifdef CONFIG_CONTIG_ALLOC
308 TRACE_EVENT(mm_alloc_contig_migrate_range_info,
309 
310 	TP_PROTO(unsigned long start,
311 		 unsigned long end,
312 		 unsigned long nr_migrated,
313 		 unsigned long nr_reclaimed,
314 		 unsigned long nr_mapped,
315 		 acr_flags_t alloc_flags),
316 
317 	TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, alloc_flags),
318 
319 	TP_STRUCT__entry(
320 		__field(unsigned long, start)
321 		__field(unsigned long, end)
322 		__field(unsigned long, nr_migrated)
323 		__field(unsigned long, nr_reclaimed)
324 		__field(unsigned long, nr_mapped)
325 		__field(acr_flags_t, alloc_flags)
326 	),
327 
328 	TP_fast_assign(
329 		__entry->start = start;
330 		__entry->end = end;
331 		__entry->nr_migrated = nr_migrated;
332 		__entry->nr_reclaimed = nr_reclaimed;
333 		__entry->nr_mapped = nr_mapped;
334 		__entry->alloc_flags = alloc_flags;
335 	),
336 
337 	TP_printk("start=0x%lx end=0x%lx alloc_flags=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
338 		  __entry->start,
339 		  __entry->end,
340 		  __entry->alloc_flags,
341 		  __entry->nr_migrated,
342 		  __entry->nr_reclaimed,
343 		  __entry->nr_mapped)
344 );
345 #endif
346 
347 TRACE_EVENT(mm_setup_per_zone_wmarks,
348 
349 	TP_PROTO(struct zone *zone),
350 
351 	TP_ARGS(zone),
352 
353 	TP_STRUCT__entry(
354 		__field(int, node_id)
355 		__string(name, zone->name)
356 		__field(unsigned long, watermark_min)
357 		__field(unsigned long, watermark_low)
358 		__field(unsigned long, watermark_high)
359 		__field(unsigned long, watermark_promo)
360 	),
361 
362 	TP_fast_assign(
363 		__entry->node_id = zone->zone_pgdat->node_id;
364 		__assign_str(name);
365 		__entry->watermark_min = zone->_watermark[WMARK_MIN];
366 		__entry->watermark_low = zone->_watermark[WMARK_LOW];
367 		__entry->watermark_high = zone->_watermark[WMARK_HIGH];
368 		__entry->watermark_promo = zone->_watermark[WMARK_PROMO];
369 	),
370 
371 	TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu",
372 		  __entry->node_id,
373 		  __get_str(name),
374 		  __entry->watermark_min,
375 		  __entry->watermark_low,
376 		  __entry->watermark_high,
377 		  __entry->watermark_promo)
378 );
379 
380 TRACE_EVENT(mm_setup_per_zone_lowmem_reserve,
381 
382 	TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
383 
384 	TP_ARGS(zone, upper_zone, lowmem_reserve),
385 
386 	TP_STRUCT__entry(
387 		__field(int, node_id)
388 		__string(name, zone->name)
389 		__string(upper_name, upper_zone->name)
390 		__field(long, lowmem_reserve)
391 	),
392 
393 	TP_fast_assign(
394 		__entry->node_id = zone->zone_pgdat->node_id;
395 		__assign_str(name);
396 		__assign_str(upper_name);
397 		__entry->lowmem_reserve = lowmem_reserve;
398 	),
399 
400 	TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld",
401 		  __entry->node_id,
402 		  __get_str(name),
403 		  __get_str(upper_name),
404 		  __entry->lowmem_reserve)
405 );
406 
407 TRACE_EVENT(mm_calculate_totalreserve_pages,
408 
409 	TP_PROTO(unsigned long totalreserve_pages),
410 
411 	TP_ARGS(totalreserve_pages),
412 
413 	TP_STRUCT__entry(
414 		__field(unsigned long, totalreserve_pages)
415 	),
416 
417 	TP_fast_assign(
418 		__entry->totalreserve_pages = totalreserve_pages;
419 	),
420 
421 	TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages)
422 );
423 
424 
425 /*
426  * Required for uniquely and securely identifying mm in rss_stat tracepoint.
427  */
428 #ifndef __PTR_TO_HASHVAL
429 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
430 {
431 	int ret;
432 	unsigned long hashval;
433 
434 	ret = ptr_to_hashval(ptr, &hashval);
435 	if (ret)
436 		return 0;
437 
438 	/* The hashed value is only 32-bit */
439 	return (unsigned int)hashval;
440 }
441 #define __PTR_TO_HASHVAL
442 #endif
443 
444 #define TRACE_MM_PAGES		\
445 	EM(MM_FILEPAGES)	\
446 	EM(MM_ANONPAGES)	\
447 	EM(MM_SWAPENTS)		\
448 	EMe(MM_SHMEMPAGES)
449 
450 #undef EM
451 #undef EMe
452 
453 #define EM(a)	TRACE_DEFINE_ENUM(a);
454 #define EMe(a)	TRACE_DEFINE_ENUM(a);
455 
456 TRACE_MM_PAGES
457 
458 #undef EM
459 #undef EMe
460 
461 #define EM(a)	{ a, #a },
462 #define EMe(a)	{ a, #a }
463 
464 TRACE_EVENT(rss_stat,
465 
466 	TP_PROTO(struct mm_struct *mm,
467 		int member),
468 
469 	TP_ARGS(mm, member),
470 
471 	TP_STRUCT__entry(
472 		__field(unsigned int, mm_id)
473 		__field(unsigned int, curr)
474 		__field(int, member)
475 		__field(long, size)
476 	),
477 
478 	TP_fast_assign(
479 		__entry->mm_id = mm_ptr_to_hash(mm);
480 		__entry->curr = !!(current->mm == mm);
481 		__entry->member = member;
482 		__entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
483 							    << PAGE_SHIFT);
484 	),
485 
486 	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
487 		__entry->mm_id,
488 		__entry->curr,
489 		__print_symbolic(__entry->member, TRACE_MM_PAGES),
490 		__entry->size)
491 	);
492 #endif /* _TRACE_KMEM_H */
493 
494 /* This part must be outside protection */
495 #include <trace/define_trace.h>
496