1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef __LINUX_CACHE_H 31da177e4SLinus Torvalds #define __LINUX_CACHE_H 41da177e4SLinus Torvalds 5c28aa1f0SJoe Perches #include <uapi/linux/kernel.h> 61da177e4SLinus Torvalds #include <asm/cache.h> 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #ifndef L1_CACHE_ALIGN 9c28aa1f0SJoe Perches #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) 101da177e4SLinus Torvalds #endif 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #ifndef SMP_CACHE_BYTES 131da177e4SLinus Torvalds #define SMP_CACHE_BYTES L1_CACHE_BYTES 141da177e4SLinus Torvalds #endif 151da177e4SLinus Torvalds 16c74ba8b3SKees Cook /* 17c74ba8b3SKees Cook * __read_mostly is used to keep rarely changing variables out of frequently 184fa72523SLuis Chamberlain * updated cachelines. Its use should be reserved for data that is used 194fa72523SLuis Chamberlain * frequently in hot paths. Performance traces can help decide when to use 204fa72523SLuis Chamberlain * this. You want __read_mostly data to be tightly packed, so that in the 214fa72523SLuis Chamberlain * best case multiple frequently read variables for a hot path will be next 224fa72523SLuis Chamberlain * to each other in order to reduce the number of cachelines needed to 234fa72523SLuis Chamberlain * execute a critical path. We should be mindful and selective of its use. 244fa72523SLuis Chamberlain * ie: if you're going to use it please supply a *good* justification in your 254fa72523SLuis Chamberlain * commit log 26c74ba8b3SKees Cook */ 27804f1594SKyle McMartin #ifndef __read_mostly 286c036527SChristoph Lameter #define __read_mostly 296c036527SChristoph Lameter #endif 306c036527SChristoph Lameter 31c74ba8b3SKees Cook /* 32c74ba8b3SKees Cook * __ro_after_init is used to mark things that are read-only after init (i.e. 33c74ba8b3SKees Cook * after mark_rodata_ro() has been called). These are effectively read-only, 34c74ba8b3SKees Cook * but may get written to during init, so can't live in .rodata (via "const"). 35c74ba8b3SKees Cook */ 36c74ba8b3SKees Cook #ifndef __ro_after_init 3733def849SJoe Perches #define __ro_after_init __section(".data..ro_after_init") 38c74ba8b3SKees Cook #endif 39c74ba8b3SKees Cook 401da177e4SLinus Torvalds #ifndef ____cacheline_aligned 411da177e4SLinus Torvalds #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) 421da177e4SLinus Torvalds #endif 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #ifndef ____cacheline_aligned_in_smp 451da177e4SLinus Torvalds #ifdef CONFIG_SMP 461da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp ____cacheline_aligned 471da177e4SLinus Torvalds #else 481da177e4SLinus Torvalds #define ____cacheline_aligned_in_smp 491da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 501da177e4SLinus Torvalds #endif 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds #ifndef __cacheline_aligned 531da177e4SLinus Torvalds #define __cacheline_aligned \ 541da177e4SLinus Torvalds __attribute__((__aligned__(SMP_CACHE_BYTES), \ 554af57b78STim Abbott __section__(".data..cacheline_aligned"))) 561da177e4SLinus Torvalds #endif /* __cacheline_aligned */ 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #ifndef __cacheline_aligned_in_smp 591da177e4SLinus Torvalds #ifdef CONFIG_SMP 601da177e4SLinus Torvalds #define __cacheline_aligned_in_smp __cacheline_aligned 611da177e4SLinus Torvalds #else 621da177e4SLinus Torvalds #define __cacheline_aligned_in_smp 631da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 641da177e4SLinus Torvalds #endif 651da177e4SLinus Torvalds 6622fc6eccSRavikiran G Thirumalai /* 6722fc6eccSRavikiran G Thirumalai * The maximum alignment needed for some critical structures 6822fc6eccSRavikiran G Thirumalai * These could be inter-node cacheline sizes/L3 cacheline 6922fc6eccSRavikiran G Thirumalai * size etc. Define this in asm/cache.h for your arch 7022fc6eccSRavikiran G Thirumalai */ 7122fc6eccSRavikiran G Thirumalai #ifndef INTERNODE_CACHE_SHIFT 7222fc6eccSRavikiran G Thirumalai #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT 7322fc6eccSRavikiran G Thirumalai #endif 7422fc6eccSRavikiran G Thirumalai 7522fc6eccSRavikiran G Thirumalai #if !defined(____cacheline_internodealigned_in_smp) 761da177e4SLinus Torvalds #if defined(CONFIG_SMP) 7722fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp \ 7822fc6eccSRavikiran G Thirumalai __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) 791da177e4SLinus Torvalds #else 8022fc6eccSRavikiran G Thirumalai #define ____cacheline_internodealigned_in_smp 811da177e4SLinus Torvalds #endif 821da177e4SLinus Torvalds #endif 831da177e4SLinus Torvalds 841b27d05bSPekka Enberg #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE 851b27d05bSPekka Enberg #define cache_line_size() L1_CACHE_BYTES 861b27d05bSPekka Enberg #endif 871b27d05bSPekka Enberg 88*aeb9ce05SCoco Li #ifndef __cacheline_group_begin 89*aeb9ce05SCoco Li #define __cacheline_group_begin(GROUP) \ 90*aeb9ce05SCoco Li __u8 __cacheline_group_begin__##GROUP[0] 91*aeb9ce05SCoco Li #endif 92*aeb9ce05SCoco Li 93*aeb9ce05SCoco Li #ifndef __cacheline_group_end 94*aeb9ce05SCoco Li #define __cacheline_group_end(GROUP) \ 95*aeb9ce05SCoco Li __u8 __cacheline_group_end__##GROUP[0] 96*aeb9ce05SCoco Li #endif 97*aeb9ce05SCoco Li 98*aeb9ce05SCoco Li #ifndef CACHELINE_ASSERT_GROUP_MEMBER 99*aeb9ce05SCoco Li #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ 100*aeb9ce05SCoco Li BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ 101*aeb9ce05SCoco Li offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \ 102*aeb9ce05SCoco Li offsetofend(TYPE, MEMBER) <= \ 103*aeb9ce05SCoco Li offsetof(TYPE, __cacheline_group_end__##GROUP))) 104*aeb9ce05SCoco Li #endif 105*aeb9ce05SCoco Li 106*aeb9ce05SCoco Li #ifndef CACHELINE_ASSERT_GROUP_SIZE 107*aeb9ce05SCoco Li #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \ 108*aeb9ce05SCoco Li BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \ 109*aeb9ce05SCoco Li offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \ 110*aeb9ce05SCoco Li SIZE) 111*aeb9ce05SCoco Li #endif 112*aeb9ce05SCoco Li 113e6ad640bSShakeel Butt /* 114e6ad640bSShakeel Butt * Helper to add padding within a struct to ensure data fall into separate 115e6ad640bSShakeel Butt * cachelines. 116e6ad640bSShakeel Butt */ 117e6ad640bSShakeel Butt #if defined(CONFIG_SMP) 118e6ad640bSShakeel Butt struct cacheline_padding { 119e6ad640bSShakeel Butt char x[0]; 120e6ad640bSShakeel Butt } ____cacheline_internodealigned_in_smp; 121e6ad640bSShakeel Butt #define CACHELINE_PADDING(name) struct cacheline_padding name 122e6ad640bSShakeel Butt #else 123e6ad640bSShakeel Butt #define CACHELINE_PADDING(name) 124e6ad640bSShakeel Butt #endif 125e6ad640bSShakeel Butt 1264ab5f8ecSCatalin Marinas #ifdef ARCH_DMA_MINALIGN 1274ab5f8ecSCatalin Marinas #define ARCH_HAS_DMA_MINALIGN 1284ab5f8ecSCatalin Marinas #else 1294ab5f8ecSCatalin Marinas #define ARCH_DMA_MINALIGN __alignof__(unsigned long long) 1304ab5f8ecSCatalin Marinas #endif 1314ab5f8ecSCatalin Marinas 1321da177e4SLinus Torvalds #endif /* __LINUX_CACHE_H */ 133