xref: /linux/arch/powerpc/include/asm/fixmap.h (revision d4b996f9ef1fe83d9ce9ad5c1ca0bd8231638ce5)
1 /*
2  * fixmap.h: compile-time virtual memory allocation
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1998 Ingo Molnar
9  *
10  * Copyright 2008 Freescale Semiconductor Inc.
11  *   Port to powerpc added by Kumar Gala
12  */
13 
14 #ifndef _ASM_FIXMAP_H
15 #define _ASM_FIXMAP_H
16 
17 #ifndef __ASSEMBLY__
18 #include <linux/sizes.h>
19 #include <linux/pgtable.h>
20 #include <asm/page.h>
21 #ifdef CONFIG_HIGHMEM
22 #include <linux/threads.h>
23 #include <asm/kmap_size.h>
24 #endif
25 
26 #ifdef CONFIG_PPC64
27 #define FIXADDR_TOP	(IOREMAP_END + FIXADDR_SIZE)
28 #else
29 #define FIXADDR_SIZE	0
30 #ifdef CONFIG_KASAN
31 #include <asm/kasan.h>
32 #define FIXADDR_TOP	(KASAN_SHADOW_START - PAGE_SIZE)
33 #else
34 #define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
35 #endif
36 #endif
37 
38 /*
39  * Here we define all the compile-time 'special' virtual
40  * addresses. The point is to have a constant address at
41  * compile time, but to set the physical address only
42  * in the boot process. We allocate these special addresses
43  * from the end of virtual memory (0xfffff000) backwards.
44  * Also this lets us do fail-safe vmalloc(), we
45  * can guarantee that these special addresses and
46  * vmalloc()-ed addresses never overlap.
47  *
48  * these 'compile-time allocated' memory buffers are
49  * fixed-size 4k pages. (or larger if used with an increment
50  * highger than 1) use fixmap_set(idx,phys) to associate
51  * physical memory with fixmap indices.
52  *
53  * TLB entries of such buffers will not be flushed across
54  * task switches.
55  */
56 enum fixed_addresses {
57 	FIX_HOLE,
58 #ifdef CONFIG_PPC32
59 	/* reserve the top 128K for early debugging purposes */
60 	FIX_EARLY_DEBUG_TOP = FIX_HOLE,
61 	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
62 #ifdef CONFIG_HIGHMEM
63 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
64 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
65 #endif
66 #ifdef CONFIG_PPC_8xx
67 	/* For IMMR we need an aligned 512K area */
68 #define FIX_IMMR_SIZE	(512 * 1024 / PAGE_SIZE)
69 	FIX_IMMR_START,
70 	FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
71 		       FIX_IMMR_SIZE,
72 #endif
73 #ifdef CONFIG_PPC_83xx
74 	/* For IMMR we need an aligned 2M area */
75 #define FIX_IMMR_SIZE	(SZ_2M / PAGE_SIZE)
76 	FIX_IMMR_START,
77 	FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
78 		       FIX_IMMR_SIZE,
79 #endif
80 	/* FIX_PCIE_MCFG, */
81 #endif /* CONFIG_PPC32 */
82 	__end_of_permanent_fixed_addresses,
83 
84 #define NR_FIX_BTMAPS		(SZ_256K / PAGE_SIZE)
85 #define FIX_BTMAPS_SLOTS	16
86 #define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
87 
88 	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
89 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
90 	__end_of_fixed_addresses
91 };
92 
93 #define __FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
94 #define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
95 
96 #define FIXMAP_ALIGNED_SIZE	(ALIGN(FIXADDR_TOP, PGDIR_SIZE) - \
97 				 ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE))
98 #define FIXMAP_PTE_SIZE	(FIXMAP_ALIGNED_SIZE / PGDIR_SIZE * PTE_TABLE_SIZE)
99 
100 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
101 #define FIXMAP_PAGE_IO	PAGE_KERNEL_NCG
102 
103 #include <asm-generic/fixmap.h>
104 
105 static inline void __set_fixmap(enum fixed_addresses idx,
106 				phys_addr_t phys, pgprot_t flags)
107 {
108 	BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
109 
110 	if (__builtin_constant_p(idx))
111 		BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
112 	else if (WARN_ON(idx >= __end_of_fixed_addresses))
113 		return;
114 
115 	map_kernel_page(__fix_to_virt(idx), phys, flags);
116 }
117 
118 #define __early_set_fixmap	__set_fixmap
119 
120 #endif /* !__ASSEMBLY__ */
121 #endif
122