1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * 5 * Derived from "include/asm-i386/elf.h" 6 */ 7 8 #ifndef __ASMS390_ELF_H 9 #define __ASMS390_ELF_H 10 11 /* s390 relocations defined by the ABIs */ 12 #define R_390_NONE 0 /* No reloc. */ 13 #define R_390_8 1 /* Direct 8 bit. */ 14 #define R_390_12 2 /* Direct 12 bit. */ 15 #define R_390_16 3 /* Direct 16 bit. */ 16 #define R_390_32 4 /* Direct 32 bit. */ 17 #define R_390_PC32 5 /* PC relative 32 bit. */ 18 #define R_390_GOT12 6 /* 12 bit GOT offset. */ 19 #define R_390_GOT32 7 /* 32 bit GOT offset. */ 20 #define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ 21 #define R_390_COPY 9 /* Copy symbol at runtime. */ 22 #define R_390_GLOB_DAT 10 /* Create GOT entry. */ 23 #define R_390_JMP_SLOT 11 /* Create PLT entry. */ 24 #define R_390_RELATIVE 12 /* Adjust by program base. */ 25 #define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ 26 #define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ 27 #define R_390_GOT16 15 /* 16 bit GOT offset. */ 28 #define R_390_PC16 16 /* PC relative 16 bit. */ 29 #define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ 30 #define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ 31 #define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ 32 #define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ 33 #define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ 34 #define R_390_64 22 /* Direct 64 bit. */ 35 #define R_390_PC64 23 /* PC relative 64 bit. */ 36 #define R_390_GOT64 24 /* 64 bit GOT offset. */ 37 #define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ 38 #define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ 39 #define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ 40 #define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ 41 #define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ 42 #define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ 43 #define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ 44 #define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ 45 #define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ 46 #define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ 47 #define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ 48 #define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ 49 #define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ 50 #define R_390_TLS_GDCALL 38 /* Tag for function call in general 51 dynamic TLS code. */ 52 #define R_390_TLS_LDCALL 39 /* Tag for function call in local 53 dynamic TLS code. */ 54 #define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic 55 thread local data. */ 56 #define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic 57 thread local data. */ 58 #define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS 59 block offset. */ 60 #define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS 61 block offset. */ 62 #define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS 63 block offset. */ 64 #define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic 65 thread local data in LD code. */ 66 #define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic 67 thread local data in LD code. */ 68 #define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for 69 negated static TLS block offset. */ 70 #define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for 71 negated static TLS block offset. */ 72 #define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for 73 negated static TLS block offset. */ 74 #define R_390_TLS_LE32 50 /* 32 bit negated offset relative to 75 static TLS block. */ 76 #define R_390_TLS_LE64 51 /* 64 bit negated offset relative to 77 static TLS block. */ 78 #define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS 79 block. */ 80 #define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS 81 block. */ 82 #define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ 83 #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ 84 #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS 85 block. */ 86 #define R_390_20 57 /* Direct 20 bit. */ 87 #define R_390_GOT20 58 /* 20 bit GOT offset. */ 88 #define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ 89 #define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS 90 block offset. */ 91 /* Keep this the last entry. */ 92 #define R_390_NUM 61 93 94 /* 95 * HWCAP flags - for AT_HWCAP 96 * 97 * Bits 32-63 are reserved for use by libc. 98 * Bit 31 is reserved and will be used by libc to determine if a second 99 * argument is passed to IFUNC resolvers. This will be implemented when 100 * there is a need for AT_HWCAP2. 101 */ 102 enum { 103 HWCAP_NR_ESAN3 = 0, 104 HWCAP_NR_ZARCH = 1, 105 HWCAP_NR_STFLE = 2, 106 HWCAP_NR_MSA = 3, 107 HWCAP_NR_LDISP = 4, 108 HWCAP_NR_EIMM = 5, 109 HWCAP_NR_DFP = 6, 110 HWCAP_NR_HPAGE = 7, 111 HWCAP_NR_ETF3EH = 8, 112 HWCAP_NR_HIGH_GPRS = 9, 113 HWCAP_NR_TE = 10, 114 HWCAP_NR_VXRS = 11, 115 HWCAP_NR_VXRS_BCD = 12, 116 HWCAP_NR_VXRS_EXT = 13, 117 HWCAP_NR_GS = 14, 118 HWCAP_NR_VXRS_EXT2 = 15, 119 HWCAP_NR_VXRS_PDE = 16, 120 HWCAP_NR_SORT = 17, 121 HWCAP_NR_DFLT = 18, 122 HWCAP_NR_VXRS_PDE2 = 19, 123 HWCAP_NR_NNPA = 20, 124 HWCAP_NR_PCI_MIO = 21, 125 HWCAP_NR_SIE = 22, 126 HWCAP_NR_MAX 127 }; 128 129 /* Bits present in AT_HWCAP. */ 130 #define HWCAP_ESAN3 BIT(HWCAP_NR_ESAN3) 131 #define HWCAP_ZARCH BIT(HWCAP_NR_ZARCH) 132 #define HWCAP_STFLE BIT(HWCAP_NR_STFLE) 133 #define HWCAP_MSA BIT(HWCAP_NR_MSA) 134 #define HWCAP_LDISP BIT(HWCAP_NR_LDISP) 135 #define HWCAP_EIMM BIT(HWCAP_NR_EIMM) 136 #define HWCAP_DFP BIT(HWCAP_NR_DFP) 137 #define HWCAP_HPAGE BIT(HWCAP_NR_HPAGE) 138 #define HWCAP_ETF3EH BIT(HWCAP_NR_ETF3EH) 139 #define HWCAP_HIGH_GPRS BIT(HWCAP_NR_HIGH_GPRS) 140 #define HWCAP_TE BIT(HWCAP_NR_TE) 141 #define HWCAP_VXRS BIT(HWCAP_NR_VXRS) 142 #define HWCAP_VXRS_BCD BIT(HWCAP_NR_VXRS_BCD) 143 #define HWCAP_VXRS_EXT BIT(HWCAP_NR_VXRS_EXT) 144 #define HWCAP_GS BIT(HWCAP_NR_GS) 145 #define HWCAP_VXRS_EXT2 BIT(HWCAP_NR_VXRS_EXT2) 146 #define HWCAP_VXRS_PDE BIT(HWCAP_NR_VXRS_PDE) 147 #define HWCAP_SORT BIT(HWCAP_NR_SORT) 148 #define HWCAP_DFLT BIT(HWCAP_NR_DFLT) 149 #define HWCAP_VXRS_PDE2 BIT(HWCAP_NR_VXRS_PDE2) 150 #define HWCAP_NNPA BIT(HWCAP_NR_NNPA) 151 #define HWCAP_PCI_MIO BIT(HWCAP_NR_PCI_MIO) 152 #define HWCAP_SIE BIT(HWCAP_NR_SIE) 153 154 /* 155 * These are used to set parameters in the core dumps. 156 */ 157 #define ELF_CLASS ELFCLASS64 158 #define ELF_DATA ELFDATA2MSB 159 #define ELF_ARCH EM_S390 160 161 /* s390 specific phdr types */ 162 #define PT_S390_PGSTE 0x70000000 163 164 /* 165 * ELF register definitions.. 166 */ 167 168 #include <linux/compat.h> 169 170 #include <asm/ptrace.h> 171 #include <asm/syscall.h> 172 #include <asm/user.h> 173 174 typedef s390_fp_regs elf_fpregset_t; 175 typedef s390_regs elf_gregset_t; 176 177 typedef s390_fp_regs compat_elf_fpregset_t; 178 typedef s390_compat_regs compat_elf_gregset_t; 179 180 #include <linux/sched/mm.h> /* for task_struct */ 181 #include <asm/mmu_context.h> 182 183 /* 184 * This is used to ensure we don't load something for the wrong architecture. 185 */ 186 #define elf_check_arch(x) \ 187 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 188 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 189 #define compat_elf_check_arch(x) \ 190 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 191 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 192 #define compat_start_thread start_thread31 193 194 struct arch_elf_state { 195 int rc; 196 }; 197 198 #define INIT_ARCH_ELF_STATE { .rc = 0 } 199 200 #define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) 201 #ifdef CONFIG_PGSTE 202 #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 203 ({ \ 204 struct arch_elf_state *_state = state; \ 205 if ((phdr)->p_type == PT_S390_PGSTE && \ 206 !page_table_allocate_pgste && \ 207 !test_thread_flag(TIF_PGSTE) && \ 208 !current->mm->context.alloc_pgste) { \ 209 set_thread_flag(TIF_PGSTE); \ 210 set_pt_regs_flag(task_pt_regs(current), \ 211 PIF_EXECVE_PGSTE_RESTART); \ 212 _state->rc = -EAGAIN; \ 213 } \ 214 _state->rc; \ 215 }) 216 #else 217 #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 218 ({ \ 219 (state)->rc; \ 220 }) 221 #endif 222 223 /* For SVR4/S390 the function pointer to be registered with `atexit` is 224 passed in R14. */ 225 #define ELF_PLAT_INIT(_r, load_addr) \ 226 do { \ 227 _r->gprs[14] = 0; \ 228 } while (0) 229 230 #define CORE_DUMP_USE_REGSET 231 #define ELF_EXEC_PAGESIZE PAGE_SIZE 232 233 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 234 use of this is to invoke "./ld.so someprog" to test out a new version of 235 the loader. We need to make sure that it is out of the way of the program 236 that it will "exec", and that there is sufficient room for the brk. 64-bit 237 tasks are aligned to 4GB. */ 238 #define ELF_ET_DYN_BASE (is_compat_task() ? \ 239 (STACK_TOP / 3 * 2) : \ 240 (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) 241 242 /* This yields a mask that user programs can use to figure out what 243 instruction set this CPU supports. */ 244 245 extern unsigned long elf_hwcap; 246 #define ELF_HWCAP (elf_hwcap) 247 248 /* This yields a string that ld.so will use to load implementation 249 specific libraries for optimization. This is more specific in 250 intent than poking at uname or /proc/cpuinfo. 251 252 For the moment, we have only optimizations for the Intel generations, 253 but that could change... */ 254 255 #define ELF_PLATFORM_SIZE 8 256 extern char elf_platform[]; 257 #define ELF_PLATFORM (elf_platform) 258 259 #ifndef CONFIG_COMPAT 260 #define SET_PERSONALITY(ex) \ 261 do { \ 262 set_personality(PER_LINUX | \ 263 (current->personality & (~PER_MASK))); \ 264 current->thread.sys_call_table = sys_call_table; \ 265 } while (0) 266 #else /* CONFIG_COMPAT */ 267 #define SET_PERSONALITY(ex) \ 268 do { \ 269 if (personality(current->personality) != PER_LINUX32) \ 270 set_personality(PER_LINUX | \ 271 (current->personality & ~PER_MASK)); \ 272 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 273 set_thread_flag(TIF_31BIT); \ 274 current->thread.sys_call_table = \ 275 sys_call_table_emu; \ 276 } else { \ 277 clear_thread_flag(TIF_31BIT); \ 278 current->thread.sys_call_table = \ 279 sys_call_table; \ 280 } \ 281 } while (0) 282 #endif /* CONFIG_COMPAT */ 283 284 /* 285 * Cache aliasing on the latest machines calls for a mapping granularity 286 * of 512KB for the anonymous mapping base. For 64-bit processes use a 287 * 512KB alignment and a randomization of up to 1GB. For 31-bit processes 288 * the virtual address space is limited, use no alignment and limit the 289 * randomization to 8MB. 290 * For the additional randomization of the program break use 32MB for 291 * 64-bit and 8MB for 31-bit. 292 */ 293 #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) 294 #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 295 #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 296 #define STACK_RND_MASK MMAP_RND_MASK 297 298 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 299 #define ARCH_DLINFO \ 300 do { \ 301 NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 302 (unsigned long)current->mm->context.vdso_base); \ 303 } while (0) 304 305 struct linux_binprm; 306 307 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 308 int arch_setup_additional_pages(struct linux_binprm *, int); 309 310 #endif 311