1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM kmem
4
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_KMEM_H
7
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
11
12 TRACE_EVENT(kmem_cache_alloc,
13
14 TP_PROTO(unsigned long call_site,
15 const void *ptr,
16 struct kmem_cache *s,
17 gfp_t gfp_flags,
18 int node),
19
20 TP_ARGS(call_site, ptr, s, gfp_flags, node),
21
22 TP_STRUCT__entry(
23 __field( unsigned long, call_site )
24 __field( const void *, ptr )
25 __string( name, s->name )
26 __field( size_t, bytes_req )
27 __field( size_t, bytes_alloc )
28 __field( unsigned long, gfp_flags )
29 __field( int, node )
30 __field( bool, accounted )
31 ),
32
33 TP_fast_assign(
34 __entry->call_site = call_site;
35 __entry->ptr = ptr;
36 __assign_str(name);
37 __entry->bytes_req = s->object_size;
38 __entry->bytes_alloc = s->size;
39 __entry->gfp_flags = (__force unsigned long)gfp_flags;
40 __entry->node = node;
41 __entry->accounted = IS_ENABLED(CONFIG_MEMCG) ?
42 ((gfp_flags & __GFP_ACCOUNT) ||
43 (s->flags & SLAB_ACCOUNT)) : false;
44 ),
45
46 TP_printk("call_site=%pS ptr=%p name=%s bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
47 (void *)__entry->call_site,
48 __entry->ptr,
49 __get_str(name),
50 __entry->bytes_req,
51 __entry->bytes_alloc,
52 show_gfp_flags(__entry->gfp_flags),
53 __entry->node,
54 __entry->accounted ? "true" : "false")
55 );
56
57 TRACE_EVENT(kmalloc,
58
59 TP_PROTO(unsigned long call_site,
60 const void *ptr,
61 size_t bytes_req,
62 size_t bytes_alloc,
63 gfp_t gfp_flags,
64 int node),
65
66 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
67
68 TP_STRUCT__entry(
69 __field( unsigned long, call_site )
70 __field( const void *, ptr )
71 __field( size_t, bytes_req )
72 __field( size_t, bytes_alloc )
73 __field( unsigned long, gfp_flags )
74 __field( int, node )
75 ),
76
77 TP_fast_assign(
78 __entry->call_site = call_site;
79 __entry->ptr = ptr;
80 __entry->bytes_req = bytes_req;
81 __entry->bytes_alloc = bytes_alloc;
82 __entry->gfp_flags = (__force unsigned long)gfp_flags;
83 __entry->node = node;
84 ),
85
86 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
87 (void *)__entry->call_site,
88 __entry->ptr,
89 __entry->bytes_req,
90 __entry->bytes_alloc,
91 show_gfp_flags(__entry->gfp_flags),
92 __entry->node,
93 (IS_ENABLED(CONFIG_MEMCG) &&
94 (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
95 );
96
97 TRACE_EVENT(kfree,
98
99 TP_PROTO(unsigned long call_site, const void *ptr),
100
101 TP_ARGS(call_site, ptr),
102
103 TP_STRUCT__entry(
104 __field( unsigned long, call_site )
105 __field( const void *, ptr )
106 ),
107
108 TP_fast_assign(
109 __entry->call_site = call_site;
110 __entry->ptr = ptr;
111 ),
112
113 TP_printk("call_site=%pS ptr=%p",
114 (void *)__entry->call_site, __entry->ptr)
115 );
116
117 TRACE_EVENT(kmem_cache_free,
118
119 TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
120
121 TP_ARGS(call_site, ptr, s),
122
123 TP_STRUCT__entry(
124 __field( unsigned long, call_site )
125 __field( const void *, ptr )
126 __string( name, s->name )
127 ),
128
129 TP_fast_assign(
130 __entry->call_site = call_site;
131 __entry->ptr = ptr;
132 __assign_str(name);
133 ),
134
135 TP_printk("call_site=%pS ptr=%p name=%s",
136 (void *)__entry->call_site, __entry->ptr, __get_str(name))
137 );
138
139 TRACE_EVENT(mm_page_free,
140
141 TP_PROTO(struct page *page, unsigned int order),
142
143 TP_ARGS(page, order),
144
145 TP_STRUCT__entry(
146 __field( unsigned long, pfn )
147 __field( unsigned int, order )
148 ),
149
150 TP_fast_assign(
151 __entry->pfn = page_to_pfn(page);
152 __entry->order = order;
153 ),
154
155 TP_printk("page=%p pfn=0x%lx order=%d",
156 pfn_to_page(__entry->pfn),
157 __entry->pfn,
158 __entry->order)
159 );
160
161 TRACE_EVENT(mm_page_free_batched,
162
163 TP_PROTO(struct page *page),
164
165 TP_ARGS(page),
166
167 TP_STRUCT__entry(
168 __field( unsigned long, pfn )
169 ),
170
171 TP_fast_assign(
172 __entry->pfn = page_to_pfn(page);
173 ),
174
175 TP_printk("page=%p pfn=0x%lx order=0",
176 pfn_to_page(__entry->pfn),
177 __entry->pfn)
178 );
179
180 TRACE_EVENT(mm_page_alloc,
181
182 TP_PROTO(struct page *page, unsigned int order,
183 gfp_t gfp_flags, int migratetype),
184
185 TP_ARGS(page, order, gfp_flags, migratetype),
186
187 TP_STRUCT__entry(
188 __field( unsigned long, pfn )
189 __field( unsigned int, order )
190 __field( unsigned long, gfp_flags )
191 __field( int, migratetype )
192 ),
193
194 TP_fast_assign(
195 __entry->pfn = page ? page_to_pfn(page) : -1UL;
196 __entry->order = order;
197 __entry->gfp_flags = (__force unsigned long)gfp_flags;
198 __entry->migratetype = migratetype;
199 ),
200
201 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
202 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
203 __entry->pfn != -1UL ? __entry->pfn : 0,
204 __entry->order,
205 __entry->migratetype,
206 show_gfp_flags(__entry->gfp_flags))
207 );
208
209 DECLARE_EVENT_CLASS(mm_page,
210
211 TP_PROTO(struct page *page, unsigned int order, int migratetype,
212 int percpu_refill),
213
214 TP_ARGS(page, order, migratetype, percpu_refill),
215
216 TP_STRUCT__entry(
217 __field( unsigned long, pfn )
218 __field( unsigned int, order )
219 __field( int, migratetype )
220 __field( int, percpu_refill )
221 ),
222
223 TP_fast_assign(
224 __entry->pfn = page ? page_to_pfn(page) : -1UL;
225 __entry->order = order;
226 __entry->migratetype = migratetype;
227 __entry->percpu_refill = percpu_refill;
228 ),
229
230 TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
231 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
232 __entry->pfn != -1UL ? __entry->pfn : 0,
233 __entry->order,
234 __entry->migratetype,
235 __entry->percpu_refill)
236 );
237
238 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
239
240 TP_PROTO(struct page *page, unsigned int order, int migratetype,
241 int percpu_refill),
242
243 TP_ARGS(page, order, migratetype, percpu_refill)
244 );
245
246 TRACE_EVENT(mm_page_pcpu_drain,
247
248 TP_PROTO(struct page *page, unsigned int order, int migratetype),
249
250 TP_ARGS(page, order, migratetype),
251
252 TP_STRUCT__entry(
253 __field( unsigned long, pfn )
254 __field( unsigned int, order )
255 __field( int, migratetype )
256 ),
257
258 TP_fast_assign(
259 __entry->pfn = page ? page_to_pfn(page) : -1UL;
260 __entry->order = order;
261 __entry->migratetype = migratetype;
262 ),
263
264 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
265 pfn_to_page(__entry->pfn), __entry->pfn,
266 __entry->order, __entry->migratetype)
267 );
268
269 TRACE_EVENT(mm_page_alloc_extfrag,
270
271 TP_PROTO(struct page *page,
272 int alloc_order, int fallback_order,
273 int alloc_migratetype, int fallback_migratetype),
274
275 TP_ARGS(page,
276 alloc_order, fallback_order,
277 alloc_migratetype, fallback_migratetype),
278
279 TP_STRUCT__entry(
280 __field( unsigned long, pfn )
281 __field( int, alloc_order )
282 __field( int, fallback_order )
283 __field( int, alloc_migratetype )
284 __field( int, fallback_migratetype )
285 __field( int, change_ownership )
286 ),
287
288 TP_fast_assign(
289 __entry->pfn = page_to_pfn(page);
290 __entry->alloc_order = alloc_order;
291 __entry->fallback_order = fallback_order;
292 __entry->alloc_migratetype = alloc_migratetype;
293 __entry->fallback_migratetype = fallback_migratetype;
294 __entry->change_ownership = (alloc_migratetype ==
295 get_pageblock_migratetype(page));
296 ),
297
298 TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
299 pfn_to_page(__entry->pfn),
300 __entry->pfn,
301 __entry->alloc_order,
302 __entry->fallback_order,
303 pageblock_order,
304 __entry->alloc_migratetype,
305 __entry->fallback_migratetype,
306 __entry->fallback_order < pageblock_order,
307 __entry->change_ownership)
308 );
309
310 TRACE_EVENT(mm_setup_per_zone_wmarks,
311
312 TP_PROTO(struct zone *zone),
313
314 TP_ARGS(zone),
315
316 TP_STRUCT__entry(
317 __field(int, node_id)
318 __string(name, zone->name)
319 __field(unsigned long, watermark_min)
320 __field(unsigned long, watermark_low)
321 __field(unsigned long, watermark_high)
322 __field(unsigned long, watermark_promo)
323 ),
324
325 TP_fast_assign(
326 __entry->node_id = zone->zone_pgdat->node_id;
327 __assign_str(name);
328 __entry->watermark_min = zone->_watermark[WMARK_MIN];
329 __entry->watermark_low = zone->_watermark[WMARK_LOW];
330 __entry->watermark_high = zone->_watermark[WMARK_HIGH];
331 __entry->watermark_promo = zone->_watermark[WMARK_PROMO];
332 ),
333
334 TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu",
335 __entry->node_id,
336 __get_str(name),
337 __entry->watermark_min,
338 __entry->watermark_low,
339 __entry->watermark_high,
340 __entry->watermark_promo)
341 );
342
343 TRACE_EVENT(mm_setup_per_zone_lowmem_reserve,
344
345 TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
346
347 TP_ARGS(zone, upper_zone, lowmem_reserve),
348
349 TP_STRUCT__entry(
350 __field(int, node_id)
351 __string(name, zone->name)
352 __string(upper_name, upper_zone->name)
353 __field(long, lowmem_reserve)
354 ),
355
356 TP_fast_assign(
357 __entry->node_id = zone->zone_pgdat->node_id;
358 __assign_str(name);
359 __assign_str(upper_name);
360 __entry->lowmem_reserve = lowmem_reserve;
361 ),
362
363 TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld",
364 __entry->node_id,
365 __get_str(name),
366 __get_str(upper_name),
367 __entry->lowmem_reserve)
368 );
369
370 TRACE_EVENT(mm_calculate_totalreserve_pages,
371
372 TP_PROTO(unsigned long totalreserve_pages),
373
374 TP_ARGS(totalreserve_pages),
375
376 TP_STRUCT__entry(
377 __field(unsigned long, totalreserve_pages)
378 ),
379
380 TP_fast_assign(
381 __entry->totalreserve_pages = totalreserve_pages;
382 ),
383
384 TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages)
385 );
386
387
388 /*
389 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
390 */
391 #ifndef __PTR_TO_HASHVAL
mm_ptr_to_hash(const void * ptr)392 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
393 {
394 int ret;
395 unsigned long hashval;
396
397 ret = ptr_to_hashval(ptr, &hashval);
398 if (ret)
399 return 0;
400
401 /* The hashed value is only 32-bit */
402 return (unsigned int)hashval;
403 }
404 #define __PTR_TO_HASHVAL
405 #endif
406
407 #define TRACE_MM_PAGES \
408 EM(MM_FILEPAGES) \
409 EM(MM_ANONPAGES) \
410 EM(MM_SWAPENTS) \
411 EMe(MM_SHMEMPAGES)
412
413 #undef EM
414 #undef EMe
415
416 #define EM(a) TRACE_DEFINE_ENUM(a);
417 #define EMe(a) TRACE_DEFINE_ENUM(a);
418
419 TRACE_MM_PAGES
420
421 #undef EM
422 #undef EMe
423
424 #define EM(a) { a, #a },
425 #define EMe(a) { a, #a }
426
427 TRACE_EVENT(rss_stat,
428
429 TP_PROTO(struct mm_struct *mm,
430 int member),
431
432 TP_ARGS(mm, member),
433
434 TP_STRUCT__entry(
435 __field(unsigned int, mm_id)
436 __field(unsigned int, curr)
437 __field(int, member)
438 __field(long, size)
439 ),
440
441 TP_fast_assign(
442 __entry->mm_id = mm_ptr_to_hash(mm);
443 __entry->curr = !!(current->mm == mm);
444 __entry->member = member;
445 __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
446 << PAGE_SHIFT);
447 ),
448
449 TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
450 __entry->mm_id,
451 __entry->curr,
452 __print_symbolic(__entry->member, TRACE_MM_PAGES),
453 __entry->size)
454 );
455 #endif /* _TRACE_KMEM_H */
456
457 /* This part must be outside protection */
458 #include <trace/define_trace.h>
459