1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6 #include <linux/elf.h>
7 #include <linux/ftrace.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/sort.h>
11 #include <linux/moduleloader.h>
12
13 #include <asm/cache.h>
14 #include <asm/opcodes.h>
15
16 #ifdef CONFIG_THUMB2_KERNEL
17 #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
18 (PLT_ENT_STRIDE - 4))
19 #else
20 #define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
21 (PLT_ENT_STRIDE - 8))
22 #endif
23
24 static const u32 fixed_plts[] = {
25 #ifdef CONFIG_DYNAMIC_FTRACE
26 FTRACE_ADDR,
27 MCOUNT_ADDR,
28 #endif
29 };
30
prealloc_fixed(struct mod_plt_sec * pltsec,struct plt_entries * plt)31 static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
32 {
33 int i;
34
35 if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
36 return;
37 pltsec->plt_count = ARRAY_SIZE(fixed_plts);
38
39 for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
40 plt->ldr[i] = PLT_ENT_LDR;
41
42 BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
43 memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
44 }
45
get_module_plt(struct module * mod,unsigned long loc,Elf32_Addr val)46 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
47 {
48 struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ?
49 &mod->arch.core : &mod->arch.init;
50 struct plt_entries *plt;
51 int idx;
52
53 /* cache the address, ELF header is available only during module load */
54 if (!pltsec->plt_ent)
55 pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
56 plt = pltsec->plt_ent;
57
58 prealloc_fixed(pltsec, plt);
59
60 for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
61 if (plt->lit[idx] == val)
62 return (u32)&plt->ldr[idx];
63
64 idx = 0;
65 /*
66 * Look for an existing entry pointing to 'val'. Given that the
67 * relocations are sorted, this will be the last entry we allocated.
68 * (if one exists).
69 */
70 if (pltsec->plt_count > 0) {
71 plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
72 idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
73
74 if (plt->lit[idx] == val)
75 return (u32)&plt->ldr[idx];
76
77 idx = (idx + 1) % PLT_ENT_COUNT;
78 if (!idx)
79 plt++;
80 }
81
82 pltsec->plt_count++;
83 BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
84
85 if (!idx)
86 /* Populate a new set of entries */
87 *plt = (struct plt_entries){
88 { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
89 { val, }
90 };
91 else
92 plt->lit[idx] = val;
93
94 return (u32)&plt->ldr[idx];
95 }
96
97 #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
98
cmp_rel(const void * a,const void * b)99 static int cmp_rel(const void *a, const void *b)
100 {
101 const Elf32_Rel *x = a, *y = b;
102 int i;
103
104 /* sort by type and symbol index */
105 i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
106 if (i == 0)
107 i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
108 return i;
109 }
110
is_zero_addend_relocation(Elf32_Addr base,const Elf32_Rel * rel)111 static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
112 {
113 u32 *tval = (u32 *)(base + rel->r_offset);
114
115 /*
116 * Do a bitwise compare on the raw addend rather than fully decoding
117 * the offset and doing an arithmetic comparison.
118 * Note that a zero-addend jump/call relocation is encoded taking the
119 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
120 */
121 switch (ELF32_R_TYPE(rel->r_info)) {
122 u16 upper, lower;
123
124 case R_ARM_THM_CALL:
125 case R_ARM_THM_JUMP24:
126 upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
127 lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
128
129 return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
130
131 case R_ARM_CALL:
132 case R_ARM_PC24:
133 case R_ARM_JUMP24:
134 return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
135 }
136 BUG();
137 }
138
duplicate_rel(Elf32_Addr base,const Elf32_Rel * rel,int num)139 static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
140 {
141 const Elf32_Rel *prev;
142
143 /*
144 * Entries are sorted by type and symbol index. That means that,
145 * if a duplicate entry exists, it must be in the preceding
146 * slot.
147 */
148 if (!num)
149 return false;
150
151 prev = rel + num - 1;
152 return cmp_rel(rel + num, prev) == 0 &&
153 is_zero_addend_relocation(base, prev);
154 }
155
156 /* Count how many PLT entries we may need */
count_plts(const Elf32_Sym * syms,Elf32_Addr base,const Elf32_Rel * rel,int num,Elf32_Word dstidx)157 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
158 const Elf32_Rel *rel, int num, Elf32_Word dstidx)
159 {
160 unsigned int ret = 0;
161 const Elf32_Sym *s;
162 int i;
163
164 for (i = 0; i < num; i++) {
165 switch (ELF32_R_TYPE(rel[i].r_info)) {
166 case R_ARM_CALL:
167 case R_ARM_PC24:
168 case R_ARM_JUMP24:
169 case R_ARM_THM_CALL:
170 case R_ARM_THM_JUMP24:
171 /*
172 * We only have to consider branch targets that resolve
173 * to symbols that are defined in a different section.
174 * This is not simply a heuristic, it is a fundamental
175 * limitation, since there is no guaranteed way to emit
176 * PLT entries sufficiently close to the branch if the
177 * section size exceeds the range of a branch
178 * instruction. So ignore relocations against defined
179 * symbols if they live in the same section as the
180 * relocation target.
181 */
182 s = syms + ELF32_R_SYM(rel[i].r_info);
183 if (s->st_shndx == dstidx)
184 break;
185
186 /*
187 * Jump relocations with non-zero addends against
188 * undefined symbols are supported by the ELF spec, but
189 * do not occur in practice (e.g., 'jump n bytes past
190 * the entry point of undefined function symbol f').
191 * So we need to support them, but there is no need to
192 * take them into consideration when trying to optimize
193 * this code. So let's only check for duplicates when
194 * the addend is zero. (Note that calls into the core
195 * module via init PLT entries could involve section
196 * relative symbol references with non-zero addends, for
197 * which we may end up emitting duplicates, but the init
198 * PLT is released along with the rest of the .init
199 * region as soon as module loading completes.)
200 */
201 if (!is_zero_addend_relocation(base, rel + i) ||
202 !duplicate_rel(base, rel, i))
203 ret++;
204 }
205 }
206 return ret;
207 }
208
module_frob_arch_sections(Elf_Ehdr * ehdr,Elf_Shdr * sechdrs,char * secstrings,struct module * mod)209 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
210 char *secstrings, struct module *mod)
211 {
212 unsigned long core_plts = ARRAY_SIZE(fixed_plts);
213 unsigned long init_plts = ARRAY_SIZE(fixed_plts);
214 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
215 Elf32_Sym *syms = NULL;
216
217 /*
218 * To store the PLTs, we expand the .text section for core module code
219 * and for initialization code.
220 */
221 for (s = sechdrs; s < sechdrs_end; ++s) {
222 if (strcmp(".plt", secstrings + s->sh_name) == 0)
223 mod->arch.core.plt = s;
224 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
225 mod->arch.init.plt = s;
226 else if (s->sh_type == SHT_SYMTAB)
227 syms = (Elf32_Sym *)s->sh_addr;
228 }
229
230 if (!mod->arch.core.plt || !mod->arch.init.plt) {
231 pr_err("%s: module PLT section(s) missing\n", mod->name);
232 return -ENOEXEC;
233 }
234 if (!syms) {
235 pr_err("%s: module symtab section missing\n", mod->name);
236 return -ENOEXEC;
237 }
238
239 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
240 Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
241 int numrels = s->sh_size / sizeof(Elf32_Rel);
242 Elf32_Shdr *dstsec = sechdrs + s->sh_info;
243
244 if (s->sh_type != SHT_REL)
245 continue;
246
247 /* ignore relocations that operate on non-exec sections */
248 if (!(dstsec->sh_flags & SHF_EXECINSTR))
249 continue;
250
251 /* sort by type and symbol index */
252 sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
253
254 if (!module_init_layout_section(secstrings + dstsec->sh_name))
255 core_plts += count_plts(syms, dstsec->sh_addr, rels,
256 numrels, s->sh_info);
257 else
258 init_plts += count_plts(syms, dstsec->sh_addr, rels,
259 numrels, s->sh_info);
260 }
261
262 mod->arch.core.plt->sh_type = SHT_NOBITS;
263 mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
264 mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
265 mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
266 sizeof(struct plt_entries));
267 mod->arch.core.plt_count = 0;
268 mod->arch.core.plt_ent = NULL;
269
270 mod->arch.init.plt->sh_type = SHT_NOBITS;
271 mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
272 mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
273 mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
274 sizeof(struct plt_entries));
275 mod->arch.init.plt_count = 0;
276 mod->arch.init.plt_ent = NULL;
277
278 pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
279 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
280 return 0;
281 }
282
in_module_plt(unsigned long loc)283 bool in_module_plt(unsigned long loc)
284 {
285 struct module *mod;
286 bool ret;
287
288 preempt_disable();
289 mod = __module_text_address(loc);
290 ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
291 loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
292 preempt_enable();
293
294 return ret;
295 }
296