xref: /linux/include/trace/events/mmflags.h (revision 1cbfb828e05171ca2dd77b5988d068e6872480fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/node.h>
3 #include <linux/mmzone.h>
4 #include <linux/compaction.h>
5 /*
6  * The order of these masks is important. Matching masks will be seen
7  * first and the left over flags will end up showing by themselves.
8  *
9  * For example, if we have GFP_KERNEL before GFP_USER we wil get:
10  *
11  *  GFP_KERNEL|GFP_HARDWALL
12  *
13  * Thus most bits set go first.
14  */
15 
16 /* These define the values that are enums (the bits) */
17 #define TRACE_GFP_FLAGS_GENERAL			\
18 	TRACE_GFP_EM(DMA)			\
19 	TRACE_GFP_EM(HIGHMEM)			\
20 	TRACE_GFP_EM(DMA32)			\
21 	TRACE_GFP_EM(MOVABLE)			\
22 	TRACE_GFP_EM(RECLAIMABLE)		\
23 	TRACE_GFP_EM(HIGH)			\
24 	TRACE_GFP_EM(IO)			\
25 	TRACE_GFP_EM(FS)			\
26 	TRACE_GFP_EM(ZERO)			\
27 	TRACE_GFP_EM(DIRECT_RECLAIM)		\
28 	TRACE_GFP_EM(KSWAPD_RECLAIM)		\
29 	TRACE_GFP_EM(WRITE)			\
30 	TRACE_GFP_EM(NOWARN)			\
31 	TRACE_GFP_EM(RETRY_MAYFAIL)		\
32 	TRACE_GFP_EM(NOFAIL)			\
33 	TRACE_GFP_EM(NORETRY)			\
34 	TRACE_GFP_EM(MEMALLOC)			\
35 	TRACE_GFP_EM(COMP)			\
36 	TRACE_GFP_EM(NOMEMALLOC)		\
37 	TRACE_GFP_EM(HARDWALL)			\
38 	TRACE_GFP_EM(THISNODE)			\
39 	TRACE_GFP_EM(ACCOUNT)			\
40 	TRACE_GFP_EM(ZEROTAGS)
41 
42 #ifdef CONFIG_KASAN_HW_TAGS
43 # define TRACE_GFP_FLAGS_KASAN			\
44 	TRACE_GFP_EM(SKIP_ZERO)			\
45 	TRACE_GFP_EM(SKIP_KASAN)
46 #else
47 # define TRACE_GFP_FLAGS_KASAN
48 #endif
49 
50 #ifdef CONFIG_LOCKDEP
51 # define TRACE_GFP_FLAGS_LOCKDEP		\
52 	TRACE_GFP_EM(NOLOCKDEP)
53 #else
54 # define TRACE_GFP_FLAGS_LOCKDEP
55 #endif
56 
57 #ifdef CONFIG_SLAB_OBJ_EXT
58 # define TRACE_GFP_FLAGS_SLAB			\
59 	TRACE_GFP_EM(NO_OBJ_EXT)
60 #else
61 # define TRACE_GFP_FLAGS_SLAB
62 #endif
63 
64 #define TRACE_GFP_FLAGS				\
65 	TRACE_GFP_FLAGS_GENERAL			\
66 	TRACE_GFP_FLAGS_KASAN			\
67 	TRACE_GFP_FLAGS_LOCKDEP			\
68 	TRACE_GFP_FLAGS_SLAB
69 
70 #undef TRACE_GFP_EM
71 #define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT);
72 
73 TRACE_GFP_FLAGS
74 
75 /* Just in case these are ever used */
76 TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT);
77 TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
78 
79 #define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
80 
81 #define __def_gfpflag_names			\
82 	gfpflag_string(GFP_TRANSHUGE),		\
83 	gfpflag_string(GFP_TRANSHUGE_LIGHT),	\
84 	gfpflag_string(GFP_HIGHUSER_MOVABLE),	\
85 	gfpflag_string(GFP_HIGHUSER),		\
86 	gfpflag_string(GFP_USER),		\
87 	gfpflag_string(GFP_KERNEL_ACCOUNT),	\
88 	gfpflag_string(GFP_KERNEL),		\
89 	gfpflag_string(GFP_NOFS),		\
90 	gfpflag_string(GFP_ATOMIC),		\
91 	gfpflag_string(GFP_NOIO),		\
92 	gfpflag_string(GFP_NOWAIT),		\
93 	gfpflag_string(GFP_DMA),		\
94 	gfpflag_string(__GFP_HIGHMEM),		\
95 	gfpflag_string(GFP_DMA32),		\
96 	gfpflag_string(__GFP_HIGH),		\
97 	gfpflag_string(__GFP_IO),		\
98 	gfpflag_string(__GFP_FS),		\
99 	gfpflag_string(__GFP_NOWARN),		\
100 	gfpflag_string(__GFP_RETRY_MAYFAIL),	\
101 	gfpflag_string(__GFP_NOFAIL),		\
102 	gfpflag_string(__GFP_NORETRY),		\
103 	gfpflag_string(__GFP_COMP),		\
104 	gfpflag_string(__GFP_ZERO),		\
105 	gfpflag_string(__GFP_NOMEMALLOC),	\
106 	gfpflag_string(__GFP_MEMALLOC),		\
107 	gfpflag_string(__GFP_HARDWALL),		\
108 	gfpflag_string(__GFP_THISNODE),		\
109 	gfpflag_string(__GFP_RECLAIMABLE),	\
110 	gfpflag_string(__GFP_MOVABLE),		\
111 	gfpflag_string(__GFP_ACCOUNT),		\
112 	gfpflag_string(__GFP_WRITE),		\
113 	gfpflag_string(__GFP_RECLAIM),		\
114 	gfpflag_string(__GFP_DIRECT_RECLAIM),	\
115 	gfpflag_string(__GFP_KSWAPD_RECLAIM),	\
116 	gfpflag_string(__GFP_ZEROTAGS)
117 
118 #ifdef CONFIG_KASAN_HW_TAGS
119 #define __def_gfpflag_names_kasan ,			\
120 	gfpflag_string(__GFP_SKIP_ZERO),		\
121 	gfpflag_string(__GFP_SKIP_KASAN)
122 #else
123 #define __def_gfpflag_names_kasan
124 #endif
125 
126 #define show_gfp_flags(flags)						\
127 	(flags) ? __print_flags(flags, "|",				\
128 	__def_gfpflag_names __def_gfpflag_names_kasan			\
129 	) : "none"
130 
131 #ifdef CONFIG_MMU
132 #define IF_HAVE_PG_MLOCK(_name) ,{1UL << PG_##_name, __stringify(_name)}
133 #else
134 #define IF_HAVE_PG_MLOCK(_name)
135 #endif
136 
137 #ifdef CONFIG_MEMORY_FAILURE
138 #define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
139 #else
140 #define IF_HAVE_PG_HWPOISON(_name)
141 #endif
142 
143 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
144 #define IF_HAVE_PG_IDLE(_name) ,{1UL << PG_##_name, __stringify(_name)}
145 #else
146 #define IF_HAVE_PG_IDLE(_name)
147 #endif
148 
149 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
150 #define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
151 #else
152 #define IF_HAVE_PG_ARCH_2(_name)
153 #endif
154 
155 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
156 #define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
157 #else
158 #define IF_HAVE_PG_ARCH_3(_name)
159 #endif
160 
161 #define DEF_PAGEFLAG_NAME(_name) { 1UL <<  PG_##_name, __stringify(_name) }
162 
163 #define __def_pageflag_names						\
164 	DEF_PAGEFLAG_NAME(locked),					\
165 	DEF_PAGEFLAG_NAME(waiters),					\
166 	DEF_PAGEFLAG_NAME(referenced),					\
167 	DEF_PAGEFLAG_NAME(uptodate),					\
168 	DEF_PAGEFLAG_NAME(dirty),					\
169 	DEF_PAGEFLAG_NAME(lru),						\
170 	DEF_PAGEFLAG_NAME(active),					\
171 	DEF_PAGEFLAG_NAME(workingset),					\
172 	DEF_PAGEFLAG_NAME(owner_priv_1),				\
173 	DEF_PAGEFLAG_NAME(owner_2),					\
174 	DEF_PAGEFLAG_NAME(arch_1),					\
175 	DEF_PAGEFLAG_NAME(reserved),					\
176 	DEF_PAGEFLAG_NAME(private),					\
177 	DEF_PAGEFLAG_NAME(private_2),					\
178 	DEF_PAGEFLAG_NAME(writeback),					\
179 	DEF_PAGEFLAG_NAME(head),					\
180 	DEF_PAGEFLAG_NAME(reclaim),					\
181 	DEF_PAGEFLAG_NAME(swapbacked),					\
182 	DEF_PAGEFLAG_NAME(unevictable)					\
183 IF_HAVE_PG_MLOCK(mlocked)						\
184 IF_HAVE_PG_HWPOISON(hwpoison)						\
185 IF_HAVE_PG_IDLE(idle)							\
186 IF_HAVE_PG_IDLE(young)							\
187 IF_HAVE_PG_ARCH_2(arch_2)						\
188 IF_HAVE_PG_ARCH_3(arch_3)
189 
190 #define show_page_flags(flags)						\
191 	(flags) ? __print_flags(flags, "|",				\
192 	__def_pageflag_names						\
193 	) : "none"
194 
195 #if defined(CONFIG_X86)
196 #define __VM_ARCH_SPECIFIC_1 {VM_PAT,     "pat"           }
197 #elif defined(CONFIG_PPC64)
198 #define __VM_ARCH_SPECIFIC_1 {VM_SAO,     "sao"           }
199 #elif defined(CONFIG_PARISC)
200 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP,	"growsup"	}
201 #elif !defined(CONFIG_MMU)
202 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy"	}
203 #else
204 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1,	"arch_1"	}
205 #endif
206 
207 #ifdef CONFIG_MEM_SOFT_DIRTY
208 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
209 #else
210 #define IF_HAVE_VM_SOFTDIRTY(flag,name)
211 #endif
212 
213 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
214 # define IF_HAVE_UFFD_MINOR(flag, name) {flag, name},
215 #else
216 # define IF_HAVE_UFFD_MINOR(flag, name)
217 #endif
218 
219 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
220 # define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
221 #else
222 # define IF_HAVE_VM_DROPPABLE(flag, name)
223 #endif
224 
225 #define __def_vmaflag_names						\
226 	{VM_READ,			"read"		},		\
227 	{VM_WRITE,			"write"		},		\
228 	{VM_EXEC,			"exec"		},		\
229 	{VM_SHARED,			"shared"	},		\
230 	{VM_MAYREAD,			"mayread"	},		\
231 	{VM_MAYWRITE,			"maywrite"	},		\
232 	{VM_MAYEXEC,			"mayexec"	},		\
233 	{VM_MAYSHARE,			"mayshare"	},		\
234 	{VM_GROWSDOWN,			"growsdown"	},		\
235 	{VM_UFFD_MISSING,		"uffd_missing"	},		\
236 IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR,	"uffd_minor"	)		\
237 	{VM_PFNMAP,			"pfnmap"	},		\
238 	{VM_UFFD_WP,			"uffd_wp"	},		\
239 	{VM_LOCKED,			"locked"	},		\
240 	{VM_IO,				"io"		},		\
241 	{VM_SEQ_READ,			"seqread"	},		\
242 	{VM_RAND_READ,			"randread"	},		\
243 	{VM_DONTCOPY,			"dontcopy"	},		\
244 	{VM_DONTEXPAND,			"dontexpand"	},		\
245 	{VM_LOCKONFAULT,		"lockonfault"	},		\
246 	{VM_ACCOUNT,			"account"	},		\
247 	{VM_NORESERVE,			"noreserve"	},		\
248 	{VM_HUGETLB,			"hugetlb"	},		\
249 	{VM_SYNC,			"sync"		},		\
250 	__VM_ARCH_SPECIFIC_1				,		\
251 	{VM_WIPEONFORK,			"wipeonfork"	},		\
252 	{VM_DONTDUMP,			"dontdump"	},		\
253 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY,	"softdirty"	)		\
254 	{VM_MIXEDMAP,			"mixedmap"	},		\
255 	{VM_HUGEPAGE,			"hugepage"	},		\
256 	{VM_NOHUGEPAGE,			"nohugepage"	},		\
257 IF_HAVE_VM_DROPPABLE(VM_DROPPABLE,	"droppable"	)		\
258 	{VM_MERGEABLE,			"mergeable"	}		\
259 
260 #define show_vma_flags(flags)						\
261 	(flags) ? __print_flags(flags, "|",				\
262 	__def_vmaflag_names						\
263 	) : "none"
264 
265 #ifdef CONFIG_COMPACTION
266 #define COMPACTION_STATUS					\
267 	EM( COMPACT_SKIPPED,		"skipped")		\
268 	EM( COMPACT_DEFERRED,		"deferred")		\
269 	EM( COMPACT_CONTINUE,		"continue")		\
270 	EM( COMPACT_SUCCESS,		"success")		\
271 	EM( COMPACT_PARTIAL_SKIPPED,	"partial_skipped")	\
272 	EM( COMPACT_COMPLETE,		"complete")		\
273 	EM( COMPACT_NO_SUITABLE_PAGE,	"no_suitable_page")	\
274 	EM( COMPACT_NOT_SUITABLE_ZONE,	"not_suitable_zone")	\
275 	EMe(COMPACT_CONTENDED,		"contended")
276 
277 /* High-level compaction status feedback */
278 #define COMPACTION_FAILED	1
279 #define COMPACTION_WITHDRAWN	2
280 #define COMPACTION_PROGRESS	3
281 
282 #define compact_result_to_feedback(result)	\
283 ({						\
284 	enum compact_result __result = result;	\
285 	(__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \
286 		(__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \
287 })
288 
289 #define COMPACTION_FEEDBACK		\
290 	EM(COMPACTION_FAILED,		"failed")	\
291 	EM(COMPACTION_WITHDRAWN,	"withdrawn")	\
292 	EMe(COMPACTION_PROGRESS,	"progress")
293 
294 #define COMPACTION_PRIORITY						\
295 	EM(COMPACT_PRIO_SYNC_FULL,	"COMPACT_PRIO_SYNC_FULL")	\
296 	EM(COMPACT_PRIO_SYNC_LIGHT,	"COMPACT_PRIO_SYNC_LIGHT")	\
297 	EMe(COMPACT_PRIO_ASYNC,		"COMPACT_PRIO_ASYNC")
298 #else
299 #define COMPACTION_STATUS
300 #define COMPACTION_PRIORITY
301 #define COMPACTION_FEEDBACK
302 #endif
303 
304 #ifdef CONFIG_ZONE_DMA
305 #define IFDEF_ZONE_DMA(X) X
306 #else
307 #define IFDEF_ZONE_DMA(X)
308 #endif
309 
310 #ifdef CONFIG_ZONE_DMA32
311 #define IFDEF_ZONE_DMA32(X) X
312 #else
313 #define IFDEF_ZONE_DMA32(X)
314 #endif
315 
316 #ifdef CONFIG_HIGHMEM
317 #define IFDEF_ZONE_HIGHMEM(X) X
318 #else
319 #define IFDEF_ZONE_HIGHMEM(X)
320 #endif
321 
322 #define ZONE_TYPE						\
323 	IFDEF_ZONE_DMA(		EM (ZONE_DMA,	 "DMA"))	\
324 	IFDEF_ZONE_DMA32(	EM (ZONE_DMA32,	 "DMA32"))	\
325 				EM (ZONE_NORMAL, "Normal")	\
326 	IFDEF_ZONE_HIGHMEM(	EM (ZONE_HIGHMEM,"HighMem"))	\
327 				EMe(ZONE_MOVABLE,"Movable")
328 
329 #define LRU_NAMES		\
330 		EM (LRU_INACTIVE_ANON, "inactive_anon") \
331 		EM (LRU_ACTIVE_ANON, "active_anon") \
332 		EM (LRU_INACTIVE_FILE, "inactive_file") \
333 		EM (LRU_ACTIVE_FILE, "active_file") \
334 		EMe(LRU_UNEVICTABLE, "unevictable")
335 
336 /*
337  * First define the enums in the above macros to be exported to userspace
338  * via TRACE_DEFINE_ENUM().
339  */
340 #undef EM
341 #undef EMe
342 #define EM(a, b)	TRACE_DEFINE_ENUM(a);
343 #define EMe(a, b)	TRACE_DEFINE_ENUM(a);
344 
345 COMPACTION_STATUS
346 COMPACTION_PRIORITY
347 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
348 ZONE_TYPE
349 LRU_NAMES
350 
351 /*
352  * Now redefine the EM() and EMe() macros to map the enums to the strings
353  * that will be printed in the output.
354  */
355 #undef EM
356 #undef EMe
357 #define EM(a, b)	{a, b},
358 #define EMe(a, b)	{a, b}
359