xref: /linux/arch/sh/include/asm/cache.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
3  *
4  * include/asm-sh/cache.h
5  *
6  * Copyright 1999 (C) Niibe Yutaka
7  * Copyright 2002, 2003 (C) Paul Mundt
8  */
9 #ifndef __ASM_SH_CACHE_H
10 #define __ASM_SH_CACHE_H
11 
12 #include <linux/init.h>
13 #include <cpu/cache.h>
14 
15 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
16 
17 /*
18  * Some drivers need to perform DMA into kmalloc'ed buffers
19  * and so we have to increase the kmalloc minalign for this.
20  */
21 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
22 
23 #define __read_mostly __section(".data..read_mostly")
24 
25 #ifndef __ASSEMBLY__
26 struct cache_info {
27 	unsigned int ways;		/* Number of cache ways */
28 	unsigned int sets;		/* Number of cache sets */
29 	unsigned int linesz;		/* Cache line size (bytes) */
30 
31 	unsigned int way_size;		/* sets * line size */
32 
33 	/*
34 	 * way_incr is the address offset for accessing the next way
35 	 * in memory mapped cache array ops.
36 	 */
37 	unsigned int way_incr;
38 	unsigned int entry_shift;
39 	unsigned int entry_mask;
40 
41 	/*
42 	 * Compute a mask which selects the address bits which overlap between
43 	 * 1. those used to select the cache set during indexing
44 	 * 2. those in the physical page number.
45 	 */
46 	unsigned int alias_mask;
47 	unsigned int n_aliases;		/* Number of aliases */
48 
49 	unsigned long flags;
50 };
51 #endif /* __ASSEMBLY__ */
52 #endif /* __ASM_SH_CACHE_H */
53