xref: /linux/arch/powerpc/include/asm/elf.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * ELF register definitions..
4  */
5 #ifndef _ASM_POWERPC_ELF_H
6 #define _ASM_POWERPC_ELF_H
7 
8 #include <linux/sched.h>	/* for task_struct */
9 #include <asm/page.h>
10 #include <asm/string.h>
11 #include <uapi/asm/elf.h>
12 
13 /*
14  * This is used to ensure we don't load something for the wrong architecture.
15  */
16 #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
17 #define compat_elf_check_arch(x)	((x)->e_machine == EM_PPC)
18 
19 #define CORE_DUMP_USE_REGSET
20 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
21 
22 /*
23  * This is the base location for PIE (ET_DYN with INTERP) loads. On
24  * 64-bit, this is raised to 4GB to leave the entire 32-bit address
25  * space open for things that want to use the area for 32-bit pointers.
26  */
27 #define ELF_ET_DYN_BASE		(is_32bit_task() ? 0x000400000UL : \
28 						   0x100000000UL)
29 
30 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
31 
32 /*
33  * Our registers are always unsigned longs, whether we're a 32 bit
34  * process or 64 bit, on either a 64 bit or 32 bit kernel.
35  *
36  * This macro relies on elf_regs[i] having the right type to truncate to,
37  * either u32 or u64.  It defines the body of the elf_core_copy_regs
38  * function, either the native one with elf_gregset_t elf_regs or
39  * the 32-bit one with elf_gregset_t32 elf_regs.
40  */
41 #define PPC_ELF_CORE_COPY_REGS(elf_regs, regs) \
42 	int i, nregs = min(sizeof(*regs) / sizeof(unsigned long), \
43 			   (size_t)ELF_NGREG);			  \
44 	for (i = 0; i < nregs; i++) \
45 		elf_regs[i] = ((unsigned long *) regs)[i]; \
46 	memset(&elf_regs[i], 0, (ELF_NGREG - i) * sizeof(elf_regs[0]))
47 
48 /* Common routine for both 32-bit and 64-bit native processes */
ppc_elf_core_copy_regs(elf_gregset_t elf_regs,struct pt_regs * regs)49 static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
50 					  struct pt_regs *regs)
51 {
52 	PPC_ELF_CORE_COPY_REGS(elf_regs, regs);
53 }
54 #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
55 
56 /* ELF_HWCAP yields a mask that user programs can use to figure out what
57    instruction set this cpu supports.  This could be done in userspace,
58    but it's not easy, and we've already done it here.  */
59 # define ELF_HWCAP	(cur_cpu_spec->cpu_user_features)
60 # define ELF_HWCAP2	(cur_cpu_spec->cpu_user_features2)
61 
62 /* This yields a string that ld.so will use to load implementation
63    specific libraries for optimization.  This is more specific in
64    intent than poking at uname or /proc/cpuinfo.  */
65 
66 #define ELF_PLATFORM	(cur_cpu_spec->platform)
67 
68 /* While ELF_PLATFORM indicates the ISA supported by the platform, it
69  * may not accurately reflect the underlying behavior of the hardware
70  * (as in the case of running in Power5+ compatibility mode on a
71  * Power6 machine).  ELF_BASE_PLATFORM allows ld.so to load libraries
72  * that are tuned for the real hardware.
73  */
74 #define ELF_BASE_PLATFORM (powerpc_base_platform)
75 
76 #ifdef __powerpc64__
77 # define ELF_PLAT_INIT(_r, load_addr)	do {	\
78 	_r->gpr[2] = load_addr; 		\
79 } while (0)
80 #endif /* __powerpc64__ */
81 
82 #ifdef __powerpc64__
83 # define SET_PERSONALITY(ex)					\
84 do {								\
85 	if (((ex).e_flags & 0x3) == 2)				\
86 		set_thread_flag(TIF_ELF2ABI);			\
87 	else							\
88 		clear_thread_flag(TIF_ELF2ABI);			\
89 	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\
90 		set_thread_flag(TIF_32BIT);			\
91 	else							\
92 		clear_thread_flag(TIF_32BIT);			\
93 	if (personality(current->personality) != PER_LINUX32)	\
94 		set_personality(PER_LINUX |			\
95 			(current->personality & (~PER_MASK)));	\
96 } while (0)
97 /*
98  * An executable for which elf_read_implies_exec() returns TRUE will
99  * have the READ_IMPLIES_EXEC personality flag set automatically. This
100  * is only required to work around bugs in old 32bit toolchains. Since
101  * the 64bit ABI has never had these issues dont enable the workaround
102  * even if we have an executable stack.
103  */
104 # define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
105 		(exec_stk == EXSTACK_DEFAULT) : 0)
106 #else
107 # define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
108 #endif /* __powerpc64__ */
109 
110 extern int dcache_bsize;
111 extern int icache_bsize;
112 extern int ucache_bsize;
113 
114 /* vDSO has arch_setup_additional_pages */
115 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
116 struct linux_binprm;
117 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
118 				       int uses_interp);
119 #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
120 
121 /* 1GB for 64bit, 8MB for 32bit */
122 #define STACK_RND_MASK (is_32bit_task() ? \
123 	(0x7ff >> (PAGE_SHIFT - 12)) : \
124 	(0x3ffff >> (PAGE_SHIFT - 12)))
125 
126 #ifdef CONFIG_SPU_BASE
127 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
128 #define NT_SPU		1
129 
130 #endif /* CONFIG_SPU_BASE */
131 
132 #ifdef CONFIG_PPC64
133 
134 #define get_cache_geometry(level) \
135 	(ppc64_caches.level.assoc << 16 | ppc64_caches.level.line_size)
136 
137 #define ARCH_DLINFO_CACHE_GEOMETRY					\
138 	NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size);		\
139 	NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i));	\
140 	NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size);		\
141 	NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d));	\
142 	NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size);		\
143 	NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2));	\
144 	NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size);		\
145 	NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, get_cache_geometry(l3))
146 
147 #else
148 #define ARCH_DLINFO_CACHE_GEOMETRY
149 #endif
150 
151 /*
152  * The requirements here are:
153  * - keep the final alignment of sp (sp & 0xf)
154  * - make sure the 32-bit value at the first 16 byte aligned position of
155  *   AUXV is greater than 16 for glibc compatibility.
156  *   AT_IGNOREPPC is used for that.
157  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
158  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
159  * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
160  */
161 #define COMMON_ARCH_DLINFO						\
162 do {									\
163 	/* Handle glibc compatibility. */				\
164 	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
165 	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
166 	/* Cache size items */						\
167 	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
168 	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
169 	NEW_AUX_ENT(AT_UCACHEBSIZE, 0);					\
170 	VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
171 	ARCH_DLINFO_CACHE_GEOMETRY;					\
172 } while (0)
173 
174 #define ARCH_DLINFO							\
175 do {									\
176 	COMMON_ARCH_DLINFO;						\
177 	NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size());		\
178 } while (0)
179 
180 #define COMPAT_ARCH_DLINFO						\
181 do {									\
182 	COMMON_ARCH_DLINFO;						\
183 	NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size_compat());	\
184 } while (0)
185 
186 /* Relocate the kernel image to @final_address */
187 void relocate(unsigned long final_address);
188 
189 struct func_desc {
190 	unsigned long addr;
191 	unsigned long toc;
192 	unsigned long env;
193 };
194 
195 #endif /* _ASM_POWERPC_ELF_H */
196