xref: /linux/arch/arm64/kernel/module.c (revision 6fb44438a5e1897a72dd11139274735256be8069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AArch64 loadable module support.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt) "Modules: " fmt
11 
12 #include <linux/bitops.h>
13 #include <linux/elf.h>
14 #include <linux/ftrace.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/moduleloader.h>
19 #include <linux/random.h>
20 #include <linux/scs.h>
21 
22 #include <asm/alternative.h>
23 #include <asm/insn.h>
24 #include <asm/scs.h>
25 #include <asm/sections.h>
26 #include <asm/text-patching.h>
27 
28 enum aarch64_reloc_op {
29 	RELOC_OP_NONE,
30 	RELOC_OP_ABS,
31 	RELOC_OP_PREL,
32 	RELOC_OP_PAGE,
33 };
34 
do_reloc(enum aarch64_reloc_op reloc_op,__le32 * place,u64 val)35 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
36 {
37 	switch (reloc_op) {
38 	case RELOC_OP_ABS:
39 		return val;
40 	case RELOC_OP_PREL:
41 		return val - (u64)place;
42 	case RELOC_OP_PAGE:
43 		return (val & ~0xfff) - ((u64)place & ~0xfff);
44 	case RELOC_OP_NONE:
45 		return 0;
46 	}
47 
48 	pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
49 	return 0;
50 }
51 
52 #define WRITE_PLACE(place, val, mod) do {				\
53 	__typeof__(val) __val = (val);					\
54 									\
55 	if (mod->state == MODULE_STATE_UNFORMED)			\
56 		*(place) = __val;					\
57 	else								\
58 		aarch64_insn_copy(place, &(__val), sizeof(*place));	\
59 } while (0)
60 
reloc_data(enum aarch64_reloc_op op,void * place,u64 val,int len,struct module * me)61 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len,
62 		      struct module *me)
63 {
64 	s64 sval = do_reloc(op, place, val);
65 
66 	/*
67 	 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
68 	 * relative and absolute relocations as having a range of [-2^15, 2^16)
69 	 * or [-2^31, 2^32), respectively. However, in order to be able to
70 	 * detect overflows reliably, we have to choose whether we interpret
71 	 * such quantities as signed or as unsigned, and stick with it.
72 	 * The way we organize our address space requires a signed
73 	 * interpretation of 32-bit relative references, so let's use that
74 	 * for all R_AARCH64_PRELxx relocations. This means our upper
75 	 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
76 	 */
77 
78 	switch (len) {
79 	case 16:
80 		WRITE_PLACE((s16 *)place, sval, me);
81 		switch (op) {
82 		case RELOC_OP_ABS:
83 			if (sval < 0 || sval > U16_MAX)
84 				return -ERANGE;
85 			break;
86 		case RELOC_OP_PREL:
87 			if (sval < S16_MIN || sval > S16_MAX)
88 				return -ERANGE;
89 			break;
90 		default:
91 			pr_err("Invalid 16-bit data relocation (%d)\n", op);
92 			return 0;
93 		}
94 		break;
95 	case 32:
96 		WRITE_PLACE((s32 *)place, sval, me);
97 		switch (op) {
98 		case RELOC_OP_ABS:
99 			if (sval < 0 || sval > U32_MAX)
100 				return -ERANGE;
101 			break;
102 		case RELOC_OP_PREL:
103 			if (sval < S32_MIN || sval > S32_MAX)
104 				return -ERANGE;
105 			break;
106 		default:
107 			pr_err("Invalid 32-bit data relocation (%d)\n", op);
108 			return 0;
109 		}
110 		break;
111 	case 64:
112 		WRITE_PLACE((s64 *)place, sval, me);
113 		break;
114 	default:
115 		pr_err("Invalid length (%d) for data relocation\n", len);
116 		return 0;
117 	}
118 	return 0;
119 }
120 
121 enum aarch64_insn_movw_imm_type {
122 	AARCH64_INSN_IMM_MOVNZ,
123 	AARCH64_INSN_IMM_MOVKZ,
124 };
125 
reloc_insn_movw(enum aarch64_reloc_op op,__le32 * place,u64 val,int lsb,enum aarch64_insn_movw_imm_type imm_type,struct module * me)126 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
127 			   int lsb, enum aarch64_insn_movw_imm_type imm_type,
128 			   struct module *me)
129 {
130 	u64 imm;
131 	s64 sval;
132 	u32 insn = le32_to_cpu(*place);
133 
134 	sval = do_reloc(op, place, val);
135 	imm = sval >> lsb;
136 
137 	if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
138 		/*
139 		 * For signed MOVW relocations, we have to manipulate the
140 		 * instruction encoding depending on whether or not the
141 		 * immediate is less than zero.
142 		 */
143 		insn &= ~(3 << 29);
144 		if (sval >= 0) {
145 			/* >=0: Set the instruction to MOVZ (opcode 10b). */
146 			insn |= 2 << 29;
147 		} else {
148 			/*
149 			 * <0: Set the instruction to MOVN (opcode 00b).
150 			 *     Since we've masked the opcode already, we
151 			 *     don't need to do anything other than
152 			 *     inverting the new immediate field.
153 			 */
154 			imm = ~imm;
155 		}
156 	}
157 
158 	/* Update the instruction with the new encoding. */
159 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
160 	WRITE_PLACE(place, cpu_to_le32(insn), me);
161 
162 	if (imm > U16_MAX)
163 		return -ERANGE;
164 
165 	return 0;
166 }
167 
reloc_insn_imm(enum aarch64_reloc_op op,__le32 * place,u64 val,int lsb,int len,enum aarch64_insn_imm_type imm_type,struct module * me)168 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
169 			  int lsb, int len, enum aarch64_insn_imm_type imm_type,
170 			  struct module *me)
171 {
172 	u64 imm, imm_mask;
173 	s64 sval;
174 	u32 insn = le32_to_cpu(*place);
175 
176 	/* Calculate the relocation value. */
177 	sval = do_reloc(op, place, val);
178 	sval >>= lsb;
179 
180 	/* Extract the value bits and shift them to bit 0. */
181 	imm_mask = (BIT(lsb + len) - 1) >> lsb;
182 	imm = sval & imm_mask;
183 
184 	/* Update the instruction's immediate field. */
185 	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
186 	WRITE_PLACE(place, cpu_to_le32(insn), me);
187 
188 	/*
189 	 * Extract the upper value bits (including the sign bit) and
190 	 * shift them to bit 0.
191 	 */
192 	sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
193 
194 	/*
195 	 * Overflow has occurred if the upper bits are not all equal to
196 	 * the sign bit of the value.
197 	 */
198 	if ((u64)(sval + 1) >= 2)
199 		return -ERANGE;
200 
201 	return 0;
202 }
203 
reloc_insn_adrp(struct module * mod,Elf64_Shdr * sechdrs,__le32 * place,u64 val,struct module * me)204 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
205 			   __le32 *place, u64 val, struct module *me)
206 {
207 	u32 insn;
208 
209 	if (!is_forbidden_offset_for_adrp(place))
210 		return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
211 				      AARCH64_INSN_IMM_ADR, me);
212 
213 	/* patch ADRP to ADR if it is in range */
214 	if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
215 			    AARCH64_INSN_IMM_ADR, me)) {
216 		insn = le32_to_cpu(*place);
217 		insn &= ~BIT(31);
218 	} else {
219 		/* out of range for ADR -> emit a veneer */
220 		val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
221 		if (!val)
222 			return -ENOEXEC;
223 		insn = aarch64_insn_gen_branch_imm((u64)place, val,
224 						   AARCH64_INSN_BRANCH_NOLINK);
225 	}
226 
227 	WRITE_PLACE(place, cpu_to_le32(insn), me);
228 	return 0;
229 }
230 
apply_relocate_add(Elf64_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)231 int apply_relocate_add(Elf64_Shdr *sechdrs,
232 		       const char *strtab,
233 		       unsigned int symindex,
234 		       unsigned int relsec,
235 		       struct module *me)
236 {
237 	unsigned int i;
238 	int ovf;
239 	bool overflow_check;
240 	Elf64_Sym *sym;
241 	void *loc;
242 	u64 val;
243 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
244 
245 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
246 		/* loc corresponds to P in the AArch64 ELF document. */
247 		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
248 			+ rel[i].r_offset;
249 
250 		/* sym is the ELF symbol we're referring to. */
251 		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
252 			+ ELF64_R_SYM(rel[i].r_info);
253 
254 		/* val corresponds to (S + A) in the AArch64 ELF document. */
255 		val = sym->st_value + rel[i].r_addend;
256 
257 		/* Check for overflow by default. */
258 		overflow_check = true;
259 
260 		/* Perform the static relocation. */
261 		switch (ELF64_R_TYPE(rel[i].r_info)) {
262 		/* Null relocations. */
263 		case R_ARM_NONE:
264 		case R_AARCH64_NONE:
265 			ovf = 0;
266 			break;
267 
268 		/* Data relocations. */
269 		case R_AARCH64_ABS64:
270 			overflow_check = false;
271 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me);
272 			break;
273 		case R_AARCH64_ABS32:
274 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me);
275 			break;
276 		case R_AARCH64_ABS16:
277 			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me);
278 			break;
279 		case R_AARCH64_PREL64:
280 			overflow_check = false;
281 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me);
282 			break;
283 		case R_AARCH64_PREL32:
284 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me);
285 			break;
286 		case R_AARCH64_PREL16:
287 			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me);
288 			break;
289 
290 		/* MOVW instruction relocations. */
291 		case R_AARCH64_MOVW_UABS_G0_NC:
292 			overflow_check = false;
293 			fallthrough;
294 		case R_AARCH64_MOVW_UABS_G0:
295 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
296 					      AARCH64_INSN_IMM_MOVKZ, me);
297 			break;
298 		case R_AARCH64_MOVW_UABS_G1_NC:
299 			overflow_check = false;
300 			fallthrough;
301 		case R_AARCH64_MOVW_UABS_G1:
302 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
303 					      AARCH64_INSN_IMM_MOVKZ, me);
304 			break;
305 		case R_AARCH64_MOVW_UABS_G2_NC:
306 			overflow_check = false;
307 			fallthrough;
308 		case R_AARCH64_MOVW_UABS_G2:
309 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
310 					      AARCH64_INSN_IMM_MOVKZ, me);
311 			break;
312 		case R_AARCH64_MOVW_UABS_G3:
313 			/* We're using the top bits so we can't overflow. */
314 			overflow_check = false;
315 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
316 					      AARCH64_INSN_IMM_MOVKZ, me);
317 			break;
318 		case R_AARCH64_MOVW_SABS_G0:
319 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
320 					      AARCH64_INSN_IMM_MOVNZ, me);
321 			break;
322 		case R_AARCH64_MOVW_SABS_G1:
323 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
324 					      AARCH64_INSN_IMM_MOVNZ, me);
325 			break;
326 		case R_AARCH64_MOVW_SABS_G2:
327 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
328 					      AARCH64_INSN_IMM_MOVNZ, me);
329 			break;
330 		case R_AARCH64_MOVW_PREL_G0_NC:
331 			overflow_check = false;
332 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
333 					      AARCH64_INSN_IMM_MOVKZ, me);
334 			break;
335 		case R_AARCH64_MOVW_PREL_G0:
336 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
337 					      AARCH64_INSN_IMM_MOVNZ, me);
338 			break;
339 		case R_AARCH64_MOVW_PREL_G1_NC:
340 			overflow_check = false;
341 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
342 					      AARCH64_INSN_IMM_MOVKZ, me);
343 			break;
344 		case R_AARCH64_MOVW_PREL_G1:
345 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
346 					      AARCH64_INSN_IMM_MOVNZ, me);
347 			break;
348 		case R_AARCH64_MOVW_PREL_G2_NC:
349 			overflow_check = false;
350 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
351 					      AARCH64_INSN_IMM_MOVKZ, me);
352 			break;
353 		case R_AARCH64_MOVW_PREL_G2:
354 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
355 					      AARCH64_INSN_IMM_MOVNZ, me);
356 			break;
357 		case R_AARCH64_MOVW_PREL_G3:
358 			/* We're using the top bits so we can't overflow. */
359 			overflow_check = false;
360 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
361 					      AARCH64_INSN_IMM_MOVNZ, me);
362 			break;
363 
364 		/* Immediate instruction relocations. */
365 		case R_AARCH64_LD_PREL_LO19:
366 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
367 					     AARCH64_INSN_IMM_19, me);
368 			break;
369 		case R_AARCH64_ADR_PREL_LO21:
370 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
371 					     AARCH64_INSN_IMM_ADR, me);
372 			break;
373 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
374 			overflow_check = false;
375 			fallthrough;
376 		case R_AARCH64_ADR_PREL_PG_HI21:
377 			ovf = reloc_insn_adrp(me, sechdrs, loc, val, me);
378 			if (ovf && ovf != -ERANGE)
379 				return ovf;
380 			break;
381 		case R_AARCH64_ADD_ABS_LO12_NC:
382 		case R_AARCH64_LDST8_ABS_LO12_NC:
383 			overflow_check = false;
384 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
385 					     AARCH64_INSN_IMM_12, me);
386 			break;
387 		case R_AARCH64_LDST16_ABS_LO12_NC:
388 			overflow_check = false;
389 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
390 					     AARCH64_INSN_IMM_12, me);
391 			break;
392 		case R_AARCH64_LDST32_ABS_LO12_NC:
393 			overflow_check = false;
394 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
395 					     AARCH64_INSN_IMM_12, me);
396 			break;
397 		case R_AARCH64_LDST64_ABS_LO12_NC:
398 			overflow_check = false;
399 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
400 					     AARCH64_INSN_IMM_12, me);
401 			break;
402 		case R_AARCH64_LDST128_ABS_LO12_NC:
403 			overflow_check = false;
404 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
405 					     AARCH64_INSN_IMM_12, me);
406 			break;
407 		case R_AARCH64_TSTBR14:
408 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
409 					     AARCH64_INSN_IMM_14, me);
410 			break;
411 		case R_AARCH64_CONDBR19:
412 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
413 					     AARCH64_INSN_IMM_19, me);
414 			break;
415 		case R_AARCH64_JUMP26:
416 		case R_AARCH64_CALL26:
417 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
418 					     AARCH64_INSN_IMM_26, me);
419 			if (ovf == -ERANGE) {
420 				val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
421 				if (!val)
422 					return -ENOEXEC;
423 				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
424 						     26, AARCH64_INSN_IMM_26, me);
425 			}
426 			break;
427 
428 		default:
429 			pr_err("module %s: unsupported RELA relocation: %llu\n",
430 			       me->name, ELF64_R_TYPE(rel[i].r_info));
431 			return -ENOEXEC;
432 		}
433 
434 		if (overflow_check && ovf == -ERANGE)
435 			goto overflow;
436 
437 	}
438 
439 	return 0;
440 
441 overflow:
442 	pr_err("module %s: overflow in relocation type %d val %Lx\n",
443 	       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
444 	return -ENOEXEC;
445 }
446 
__init_plt(struct plt_entry * plt,unsigned long addr)447 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
448 {
449 	*plt = get_plt_entry(addr, plt);
450 }
451 
module_init_ftrace_plt(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,struct module * mod)452 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
453 				  const Elf_Shdr *sechdrs,
454 				  struct module *mod)
455 {
456 #if defined(CONFIG_DYNAMIC_FTRACE)
457 	const Elf_Shdr *s;
458 	struct plt_entry *plts;
459 
460 	s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
461 	if (!s)
462 		return -ENOEXEC;
463 
464 	plts = (void *)s->sh_addr;
465 
466 	__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
467 
468 	mod->arch.ftrace_trampolines = plts;
469 #endif
470 	return 0;
471 }
472 
module_finalize(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,struct module * me)473 int module_finalize(const Elf_Ehdr *hdr,
474 		    const Elf_Shdr *sechdrs,
475 		    struct module *me)
476 {
477 	const Elf_Shdr *s;
478 	int ret;
479 
480 	s = find_section(hdr, sechdrs, ".altinstructions");
481 	if (s)
482 		apply_alternatives_module((void *)s->sh_addr, s->sh_size);
483 
484 	if (scs_is_dynamic()) {
485 		s = find_section(hdr, sechdrs, ".init.eh_frame");
486 		if (s) {
487 			ret = __pi_scs_patch((void *)s->sh_addr, s->sh_size);
488 			if (ret)
489 				pr_err("module %s: error occurred during dynamic SCS patching (%d)\n",
490 				       me->name, ret);
491 		}
492 	}
493 
494 	return module_init_ftrace_plt(hdr, sechdrs, me);
495 }
496