xref: /linux/include/linux/cache.h (revision 39daa09d34ada1bc7227d68def63e0a2105b5496)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_CACHE_H
3 #define __LINUX_CACHE_H
4 
5 #include <uapi/linux/kernel.h>
6 #include <asm/cache.h>
7 
8 #ifndef L1_CACHE_ALIGN
9 #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
10 #endif
11 
12 #ifndef SMP_CACHE_BYTES
13 #define SMP_CACHE_BYTES L1_CACHE_BYTES
14 #endif
15 
16 /**
17  * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
18  * @x: value to align
19  *
20  * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
21  * this needs to be accounted.
22  *
23  * Return: aligned value.
24  */
25 #ifndef SMP_CACHE_ALIGN
26 #define SMP_CACHE_ALIGN(x)	ALIGN(x, SMP_CACHE_BYTES)
27 #endif
28 
29 /*
30  * ``__aligned_largest`` aligns a field to the value most optimal for the
31  * target architecture to perform memory operations. Get the actual value
32  * to be able to use it anywhere else.
33  */
34 #ifndef __LARGEST_ALIGN
35 #define __LARGEST_ALIGN		sizeof(struct { long x; } __aligned_largest)
36 #endif
37 
38 #ifndef LARGEST_ALIGN
39 #define LARGEST_ALIGN(x)	ALIGN(x, __LARGEST_ALIGN)
40 #endif
41 
42 /*
43  * __read_mostly is used to keep rarely changing variables out of frequently
44  * updated cachelines. Its use should be reserved for data that is used
45  * frequently in hot paths. Performance traces can help decide when to use
46  * this. You want __read_mostly data to be tightly packed, so that in the
47  * best case multiple frequently read variables for a hot path will be next
48  * to each other in order to reduce the number of cachelines needed to
49  * execute a critical path. We should be mindful and selective of its use.
50  * ie: if you're going to use it please supply a *good* justification in your
51  * commit log
52  */
53 #ifndef __read_mostly
54 #define __read_mostly
55 #endif
56 
57 /*
58  * __ro_after_init is used to mark things that are read-only after init (i.e.
59  * after mark_rodata_ro() has been called). These are effectively read-only,
60  * but may get written to during init, so can't live in .rodata (via "const").
61  */
62 #ifndef __ro_after_init
63 #define __ro_after_init __section(".data..ro_after_init")
64 #endif
65 
66 #ifndef ____cacheline_aligned
67 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
68 #endif
69 
70 #ifndef ____cacheline_aligned_in_smp
71 #ifdef CONFIG_SMP
72 #define ____cacheline_aligned_in_smp ____cacheline_aligned
73 #else
74 #define ____cacheline_aligned_in_smp
75 #endif /* CONFIG_SMP */
76 #endif
77 
78 #ifndef __cacheline_aligned
79 #define __cacheline_aligned					\
80   __attribute__((__aligned__(SMP_CACHE_BYTES),			\
81 		 __section__(".data..cacheline_aligned")))
82 #endif /* __cacheline_aligned */
83 
84 #ifndef __cacheline_aligned_in_smp
85 #ifdef CONFIG_SMP
86 #define __cacheline_aligned_in_smp __cacheline_aligned
87 #else
88 #define __cacheline_aligned_in_smp
89 #endif /* CONFIG_SMP */
90 #endif
91 
92 /*
93  * The maximum alignment needed for some critical structures
94  * These could be inter-node cacheline sizes/L3 cacheline
95  * size etc.  Define this in asm/cache.h for your arch
96  */
97 #ifndef INTERNODE_CACHE_SHIFT
98 #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
99 #endif
100 
101 #if !defined(____cacheline_internodealigned_in_smp)
102 #if defined(CONFIG_SMP)
103 #define ____cacheline_internodealigned_in_smp \
104 	__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
105 #else
106 #define ____cacheline_internodealigned_in_smp
107 #endif
108 #endif
109 
110 #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
111 #define cache_line_size()	L1_CACHE_BYTES
112 #endif
113 
114 #ifndef __cacheline_group_begin
115 #define __cacheline_group_begin(GROUP) \
116 	__u8 __cacheline_group_begin__##GROUP[0]
117 #endif
118 
119 #ifndef __cacheline_group_end
120 #define __cacheline_group_end(GROUP) \
121 	__u8 __cacheline_group_end__##GROUP[0]
122 #endif
123 
124 /**
125  * __cacheline_group_begin_aligned - declare an aligned group start
126  * @GROUP: name of the group
127  * @...: optional group alignment
128  *
129  * The following block inside a struct:
130  *
131  *	__cacheline_group_begin_aligned(grp);
132  *	field a;
133  *	field b;
134  *	__cacheline_group_end_aligned(grp);
135  *
136  * will always be aligned to either the specified alignment or
137  * ``SMP_CACHE_BYTES``.
138  */
139 #define __cacheline_group_begin_aligned(GROUP, ...)		\
140 	__cacheline_group_begin(GROUP)				\
141 	__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
142 
143 /**
144  * __cacheline_group_end_aligned - declare an aligned group end
145  * @GROUP: name of the group
146  * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
147  *
148  * Note that the end marker is aligned to sizeof(long) to allow more precise
149  * size assertion. It also declares a padding at the end to avoid next field
150  * falling into this cacheline.
151  */
152 #define __cacheline_group_end_aligned(GROUP, ...)		\
153 	__cacheline_group_end(GROUP) __aligned(sizeof(long));	\
154 	struct { } __cacheline_group_pad__##GROUP		\
155 	__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
156 
157 #ifndef CACHELINE_ASSERT_GROUP_MEMBER
158 #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
159 	BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
160 		       offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
161 		       offsetofend(TYPE, MEMBER) <= \
162 		       offsetof(TYPE, __cacheline_group_end__##GROUP)))
163 #endif
164 
165 #ifndef CACHELINE_ASSERT_GROUP_SIZE
166 #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
167 	BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
168 		     offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
169 		     SIZE)
170 #endif
171 
172 /*
173  * Helper to add padding within a struct to ensure data fall into separate
174  * cachelines.
175  */
176 #if defined(CONFIG_SMP)
177 struct cacheline_padding {
178 	char x[0];
179 } ____cacheline_internodealigned_in_smp;
180 #define CACHELINE_PADDING(name)		struct cacheline_padding name
181 #else
182 #define CACHELINE_PADDING(name)
183 #endif
184 
185 #ifdef ARCH_DMA_MINALIGN
186 #define ARCH_HAS_DMA_MINALIGN
187 #else
188 #define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
189 #endif
190 
191 #endif /* __LINUX_CACHE_H */
192