xref: /linux/include/linux/cache.h (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_CACHE_H
3 #define __LINUX_CACHE_H
4 
5 #include <uapi/linux/kernel.h>
6 #include <asm/cache.h>
7 
8 #ifndef L1_CACHE_ALIGN
9 #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
10 #endif
11 
12 #ifndef SMP_CACHE_BYTES
13 #define SMP_CACHE_BYTES L1_CACHE_BYTES
14 #endif
15 
16 /*
17  * __read_mostly is used to keep rarely changing variables out of frequently
18  * updated cachelines. Its use should be reserved for data that is used
19  * frequently in hot paths. Performance traces can help decide when to use
20  * this. You want __read_mostly data to be tightly packed, so that in the
21  * best case multiple frequently read variables for a hot path will be next
22  * to each other in order to reduce the number of cachelines needed to
23  * execute a critical path. We should be mindful and selective of its use.
24  * ie: if you're going to use it please supply a *good* justification in your
25  * commit log
26  */
27 #ifndef __read_mostly
28 #define __read_mostly
29 #endif
30 
31 /*
32  * __ro_after_init is used to mark things that are read-only after init (i.e.
33  * after mark_rodata_ro() has been called). These are effectively read-only,
34  * but may get written to during init, so can't live in .rodata (via "const").
35  */
36 #ifndef __ro_after_init
37 #define __ro_after_init __section(".data..ro_after_init")
38 #endif
39 
40 #ifndef ____cacheline_aligned
41 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
42 #endif
43 
44 #ifndef ____cacheline_aligned_in_smp
45 #ifdef CONFIG_SMP
46 #define ____cacheline_aligned_in_smp ____cacheline_aligned
47 #else
48 #define ____cacheline_aligned_in_smp
49 #endif /* CONFIG_SMP */
50 #endif
51 
52 #ifndef __cacheline_aligned
53 #define __cacheline_aligned					\
54   __attribute__((__aligned__(SMP_CACHE_BYTES),			\
55 		 __section__(".data..cacheline_aligned")))
56 #endif /* __cacheline_aligned */
57 
58 #ifndef __cacheline_aligned_in_smp
59 #ifdef CONFIG_SMP
60 #define __cacheline_aligned_in_smp __cacheline_aligned
61 #else
62 #define __cacheline_aligned_in_smp
63 #endif /* CONFIG_SMP */
64 #endif
65 
66 /*
67  * The maximum alignment needed for some critical structures
68  * These could be inter-node cacheline sizes/L3 cacheline
69  * size etc.  Define this in asm/cache.h for your arch
70  */
71 #ifndef INTERNODE_CACHE_SHIFT
72 #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
73 #endif
74 
75 #if !defined(____cacheline_internodealigned_in_smp)
76 #if defined(CONFIG_SMP)
77 #define ____cacheline_internodealigned_in_smp \
78 	__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
79 #else
80 #define ____cacheline_internodealigned_in_smp
81 #endif
82 #endif
83 
84 #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
85 #define cache_line_size()	L1_CACHE_BYTES
86 #endif
87 
88 #endif /* __LINUX_CACHE_H */
89