xref: /linux/arch/riscv/kernel/module.c (revision 4d7b321a9ce0782a953874ec69acc2b12b9cb2cd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Copyright (C) 2017 Zihao Yu
5  */
6 
7 #include <linux/elf.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/hashtable.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/moduleloader.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sizes.h>
16 #include <linux/pgtable.h>
17 #include <linux/execmem.h>
18 #include <asm/alternative.h>
19 #include <asm/sections.h>
20 
21 struct used_bucket {
22 	struct list_head head;
23 	struct hlist_head *bucket;
24 };
25 
26 struct relocation_head {
27 	struct hlist_node node;
28 	struct list_head *rel_entry;
29 	void *location;
30 };
31 
32 struct relocation_entry {
33 	struct list_head head;
34 	Elf_Addr value;
35 	unsigned int type;
36 };
37 
38 struct relocation_handlers {
39 	int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
40 	int (*accumulate_handler)(struct module *me, void *location,
41 				  long buffer);
42 };
43 
44 /*
45  * The auipc+jalr instruction pair can reach any PC-relative offset
46  * in the range [-2^31 - 2^11, 2^31 - 2^11)
47  */
48 static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
49 {
50 #ifdef CONFIG_32BIT
51 	return true;
52 #else
53 	return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
54 #endif
55 }
56 
57 static int riscv_insn_rmw(void *location, u32 keep, u32 set)
58 {
59 	__le16 *parcel = location;
60 	u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
61 
62 	insn &= keep;
63 	insn |= set;
64 
65 	parcel[0] = cpu_to_le16(insn);
66 	parcel[1] = cpu_to_le16(insn >> 16);
67 	return 0;
68 }
69 
70 static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
71 {
72 	__le16 *parcel = location;
73 	u16 insn = le16_to_cpu(*parcel);
74 
75 	insn &= keep;
76 	insn |= set;
77 
78 	*parcel = cpu_to_le16(insn);
79 	return 0;
80 }
81 
82 static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
83 {
84 	if (v != (u32)v) {
85 		pr_err("%s: value %016llx out of range for 32-bit field\n",
86 		       me->name, (long long)v);
87 		return -EINVAL;
88 	}
89 	*(u32 *)location = v;
90 	return 0;
91 }
92 
93 static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
94 {
95 	*(u64 *)location = v;
96 	return 0;
97 }
98 
99 static int apply_r_riscv_branch_rela(struct module *me, void *location,
100 				     Elf_Addr v)
101 {
102 	ptrdiff_t offset = (void *)v - location;
103 	u32 imm12 = (offset & 0x1000) << (31 - 12);
104 	u32 imm11 = (offset & 0x800) >> (11 - 7);
105 	u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
106 	u32 imm4_1 = (offset & 0x1e) << (11 - 4);
107 
108 	return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
109 }
110 
111 static int apply_r_riscv_jal_rela(struct module *me, void *location,
112 				  Elf_Addr v)
113 {
114 	ptrdiff_t offset = (void *)v - location;
115 	u32 imm20 = (offset & 0x100000) << (31 - 20);
116 	u32 imm19_12 = (offset & 0xff000);
117 	u32 imm11 = (offset & 0x800) << (20 - 11);
118 	u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
119 
120 	return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
121 }
122 
123 static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
124 					 Elf_Addr v)
125 {
126 	ptrdiff_t offset = (void *)v - location;
127 	u16 imm8 = (offset & 0x100) << (12 - 8);
128 	u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
129 	u16 imm5 = (offset & 0x20) >> (5 - 2);
130 	u16 imm4_3 = (offset & 0x18) << (12 - 5);
131 	u16 imm2_1 = (offset & 0x6) << (12 - 10);
132 
133 	return riscv_insn_rvc_rmw(location, 0xe383,
134 			imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
135 }
136 
137 static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
138 				       Elf_Addr v)
139 {
140 	ptrdiff_t offset = (void *)v - location;
141 	u16 imm11 = (offset & 0x800) << (12 - 11);
142 	u16 imm10 = (offset & 0x400) >> (10 - 8);
143 	u16 imm9_8 = (offset & 0x300) << (12 - 11);
144 	u16 imm7 = (offset & 0x80) >> (7 - 6);
145 	u16 imm6 = (offset & 0x40) << (12 - 11);
146 	u16 imm5 = (offset & 0x20) >> (5 - 2);
147 	u16 imm4 = (offset & 0x10) << (12 - 5);
148 	u16 imm3_1 = (offset & 0xe) << (12 - 10);
149 
150 	return riscv_insn_rvc_rmw(location, 0xe003,
151 			imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
152 }
153 
154 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
155 					 Elf_Addr v)
156 {
157 	ptrdiff_t offset = (void *)v - location;
158 
159 	if (!riscv_insn_valid_32bit_offset(offset)) {
160 		pr_err(
161 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
162 		  me->name, (long long)v, location);
163 		return -EINVAL;
164 	}
165 
166 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
167 }
168 
169 static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
170 					   Elf_Addr v)
171 {
172 	/*
173 	 * v is the lo12 value to fill. It is calculated before calling this
174 	 * handler.
175 	 */
176 	return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
177 }
178 
179 static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
180 					   Elf_Addr v)
181 {
182 	/*
183 	 * v is the lo12 value to fill. It is calculated before calling this
184 	 * handler.
185 	 */
186 	u32 imm11_5 = (v & 0xfe0) << (31 - 11);
187 	u32 imm4_0 = (v & 0x1f) << (11 - 4);
188 
189 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
190 }
191 
192 static int apply_r_riscv_hi20_rela(struct module *me, void *location,
193 				   Elf_Addr v)
194 {
195 	if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
196 		pr_err(
197 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
198 		  me->name, (long long)v, location);
199 		return -EINVAL;
200 	}
201 
202 	return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
203 }
204 
205 static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
206 				     Elf_Addr v)
207 {
208 	/* Skip medlow checking because of filtering by HI20 already */
209 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
210 	s32 lo12 = ((s32)v - hi20);
211 
212 	return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
213 }
214 
215 static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
216 				     Elf_Addr v)
217 {
218 	/* Skip medlow checking because of filtering by HI20 already */
219 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
220 	s32 lo12 = ((s32)v - hi20);
221 	u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
222 	u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
223 
224 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
225 }
226 
227 static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
228 				       Elf_Addr v)
229 {
230 	ptrdiff_t offset = (void *)v - location;
231 
232 	/* Always emit the got entry */
233 	if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
234 		offset = (void *)module_emit_got_entry(me, v) - location;
235 	} else {
236 		pr_err(
237 		  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
238 		  me->name, (long long)v, location);
239 		return -EINVAL;
240 	}
241 
242 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
243 }
244 
245 static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
246 				       Elf_Addr v)
247 {
248 	ptrdiff_t offset = (void *)v - location;
249 	u32 hi20, lo12;
250 
251 	if (!riscv_insn_valid_32bit_offset(offset)) {
252 		/* Only emit the plt entry if offset over 32-bit range */
253 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
254 			offset = (void *)module_emit_plt_entry(me, v) - location;
255 		} else {
256 			pr_err(
257 			  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
258 			  me->name, (long long)v, location);
259 			return -EINVAL;
260 		}
261 	}
262 
263 	hi20 = (offset + 0x800) & 0xfffff000;
264 	lo12 = (offset - hi20) & 0xfff;
265 	riscv_insn_rmw(location, 0xfff, hi20);
266 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
267 }
268 
269 static int apply_r_riscv_call_rela(struct module *me, void *location,
270 				   Elf_Addr v)
271 {
272 	ptrdiff_t offset = (void *)v - location;
273 	u32 hi20, lo12;
274 
275 	if (!riscv_insn_valid_32bit_offset(offset)) {
276 		pr_err(
277 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
278 		  me->name, (long long)v, location);
279 		return -EINVAL;
280 	}
281 
282 	hi20 = (offset + 0x800) & 0xfffff000;
283 	lo12 = (offset - hi20) & 0xfff;
284 	riscv_insn_rmw(location, 0xfff, hi20);
285 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
286 }
287 
288 static int apply_r_riscv_relax_rela(struct module *me, void *location,
289 				    Elf_Addr v)
290 {
291 	return 0;
292 }
293 
294 static int apply_r_riscv_align_rela(struct module *me, void *location,
295 				    Elf_Addr v)
296 {
297 	pr_err(
298 	  "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
299 	  me->name, location);
300 	return -EINVAL;
301 }
302 
303 static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
304 {
305 	*(u8 *)location += (u8)v;
306 	return 0;
307 }
308 
309 static int apply_r_riscv_add16_rela(struct module *me, void *location,
310 				    Elf_Addr v)
311 {
312 	*(u16 *)location += (u16)v;
313 	return 0;
314 }
315 
316 static int apply_r_riscv_add32_rela(struct module *me, void *location,
317 				    Elf_Addr v)
318 {
319 	*(u32 *)location += (u32)v;
320 	return 0;
321 }
322 
323 static int apply_r_riscv_add64_rela(struct module *me, void *location,
324 				    Elf_Addr v)
325 {
326 	*(u64 *)location += (u64)v;
327 	return 0;
328 }
329 
330 static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
331 {
332 	*(u8 *)location -= (u8)v;
333 	return 0;
334 }
335 
336 static int apply_r_riscv_sub16_rela(struct module *me, void *location,
337 				    Elf_Addr v)
338 {
339 	*(u16 *)location -= (u16)v;
340 	return 0;
341 }
342 
343 static int apply_r_riscv_sub32_rela(struct module *me, void *location,
344 				    Elf_Addr v)
345 {
346 	*(u32 *)location -= (u32)v;
347 	return 0;
348 }
349 
350 static int apply_r_riscv_sub64_rela(struct module *me, void *location,
351 				    Elf_Addr v)
352 {
353 	*(u64 *)location -= (u64)v;
354 	return 0;
355 }
356 
357 static int dynamic_linking_not_supported(struct module *me, void *location,
358 					 Elf_Addr v)
359 {
360 	pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
361 	       me->name, location);
362 	return -EINVAL;
363 }
364 
365 static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
366 {
367 	pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
368 	       me->name, location);
369 	return -EINVAL;
370 }
371 
372 static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
373 {
374 	u8 *byte = location;
375 	u8 value = v;
376 
377 	*byte = (*byte - (value & 0x3f)) & 0x3f;
378 	return 0;
379 }
380 
381 static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
382 {
383 	u8 *byte = location;
384 	u8 value = v;
385 
386 	*byte = (*byte & 0xc0) | (value & 0x3f);
387 	return 0;
388 }
389 
390 static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
391 {
392 	*(u8 *)location = (u8)v;
393 	return 0;
394 }
395 
396 static int apply_r_riscv_set16_rela(struct module *me, void *location,
397 				    Elf_Addr v)
398 {
399 	*(u16 *)location = (u16)v;
400 	return 0;
401 }
402 
403 static int apply_r_riscv_set32_rela(struct module *me, void *location,
404 				    Elf_Addr v)
405 {
406 	*(u32 *)location = (u32)v;
407 	return 0;
408 }
409 
410 static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
411 				       Elf_Addr v)
412 {
413 	*(u32 *)location = v - (uintptr_t)location;
414 	return 0;
415 }
416 
417 static int apply_r_riscv_plt32_rela(struct module *me, void *location,
418 				    Elf_Addr v)
419 {
420 	ptrdiff_t offset = (void *)v - location;
421 
422 	if (!riscv_insn_valid_32bit_offset(offset)) {
423 		/* Only emit the plt entry if offset over 32-bit range */
424 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
425 			offset = (void *)module_emit_plt_entry(me, v) - location;
426 		} else {
427 			pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
428 			       me->name, (long long)v, location);
429 			return -EINVAL;
430 		}
431 	}
432 
433 	*(u32 *)location = (u32)offset;
434 	return 0;
435 }
436 
437 static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
438 {
439 	*(long *)location = v;
440 	return 0;
441 }
442 
443 static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
444 {
445 	*(long *)location -= v;
446 	return 0;
447 }
448 
449 static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
450 {
451 	u8 *byte = location;
452 	u8 value = buffer;
453 
454 	if (buffer > 0x3f) {
455 		pr_err("%s: value %ld out of range for 6-bit relocation.\n",
456 		       me->name, buffer);
457 		return -EINVAL;
458 	}
459 
460 	*byte = (*byte & 0xc0) | (value & 0x3f);
461 	return 0;
462 }
463 
464 static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
465 {
466 	if (buffer > U8_MAX) {
467 		pr_err("%s: value %ld out of range for 8-bit relocation.\n",
468 		       me->name, buffer);
469 		return -EINVAL;
470 	}
471 	*(u8 *)location = (u8)buffer;
472 	return 0;
473 }
474 
475 static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
476 {
477 	if (buffer > U16_MAX) {
478 		pr_err("%s: value %ld out of range for 16-bit relocation.\n",
479 		       me->name, buffer);
480 		return -EINVAL;
481 	}
482 	*(u16 *)location = (u16)buffer;
483 	return 0;
484 }
485 
486 static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
487 {
488 	if (buffer > U32_MAX) {
489 		pr_err("%s: value %ld out of range for 32-bit relocation.\n",
490 		       me->name, buffer);
491 		return -EINVAL;
492 	}
493 	*(u32 *)location = (u32)buffer;
494 	return 0;
495 }
496 
497 static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
498 {
499 	*(u64 *)location = (u64)buffer;
500 	return 0;
501 }
502 
503 static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
504 {
505 	/*
506 	 * ULEB128 is a variable length encoding. Encode the buffer into
507 	 * the ULEB128 data format.
508 	 */
509 	u8 *p = location;
510 
511 	while (buffer != 0) {
512 		u8 value = buffer & 0x7f;
513 
514 		buffer >>= 7;
515 		value |= (!!buffer) << 7;
516 
517 		*p++ = value;
518 	}
519 	return 0;
520 }
521 
522 /*
523  * Relocations defined in the riscv-elf-psabi-doc.
524  * This handles static linking only.
525  */
526 static const struct relocation_handlers reloc_handlers[] = {
527 	[R_RISCV_32]		= { .reloc_handler = apply_r_riscv_32_rela },
528 	[R_RISCV_64]		= { .reloc_handler = apply_r_riscv_64_rela },
529 	[R_RISCV_RELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
530 	[R_RISCV_COPY]		= { .reloc_handler = dynamic_linking_not_supported },
531 	[R_RISCV_JUMP_SLOT]	= { .reloc_handler = dynamic_linking_not_supported },
532 	[R_RISCV_TLS_DTPMOD32]	= { .reloc_handler = dynamic_linking_not_supported },
533 	[R_RISCV_TLS_DTPMOD64]	= { .reloc_handler = dynamic_linking_not_supported },
534 	[R_RISCV_TLS_DTPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
535 	[R_RISCV_TLS_DTPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
536 	[R_RISCV_TLS_TPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
537 	[R_RISCV_TLS_TPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
538 	/* 12-15 undefined */
539 	[R_RISCV_BRANCH]	= { .reloc_handler = apply_r_riscv_branch_rela },
540 	[R_RISCV_JAL]		= { .reloc_handler = apply_r_riscv_jal_rela },
541 	[R_RISCV_CALL]		= { .reloc_handler = apply_r_riscv_call_rela },
542 	[R_RISCV_CALL_PLT]	= { .reloc_handler = apply_r_riscv_call_plt_rela },
543 	[R_RISCV_GOT_HI20]	= { .reloc_handler = apply_r_riscv_got_hi20_rela },
544 	[R_RISCV_TLS_GOT_HI20]	= { .reloc_handler = tls_not_supported },
545 	[R_RISCV_TLS_GD_HI20]	= { .reloc_handler = tls_not_supported },
546 	[R_RISCV_PCREL_HI20]	= { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
547 	[R_RISCV_PCREL_LO12_I]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
548 	[R_RISCV_PCREL_LO12_S]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
549 	[R_RISCV_HI20]		= { .reloc_handler = apply_r_riscv_hi20_rela },
550 	[R_RISCV_LO12_I]	= { .reloc_handler = apply_r_riscv_lo12_i_rela },
551 	[R_RISCV_LO12_S]	= { .reloc_handler = apply_r_riscv_lo12_s_rela },
552 	[R_RISCV_TPREL_HI20]	= { .reloc_handler = tls_not_supported },
553 	[R_RISCV_TPREL_LO12_I]	= { .reloc_handler = tls_not_supported },
554 	[R_RISCV_TPREL_LO12_S]	= { .reloc_handler = tls_not_supported },
555 	[R_RISCV_TPREL_ADD]	= { .reloc_handler = tls_not_supported },
556 	[R_RISCV_ADD8]		= { .reloc_handler = apply_r_riscv_add8_rela,
557 				    .accumulate_handler = apply_8_bit_accumulation },
558 	[R_RISCV_ADD16]		= { .reloc_handler = apply_r_riscv_add16_rela,
559 				    .accumulate_handler = apply_16_bit_accumulation },
560 	[R_RISCV_ADD32]		= { .reloc_handler = apply_r_riscv_add32_rela,
561 				    .accumulate_handler = apply_32_bit_accumulation },
562 	[R_RISCV_ADD64]		= { .reloc_handler = apply_r_riscv_add64_rela,
563 				    .accumulate_handler = apply_64_bit_accumulation },
564 	[R_RISCV_SUB8]		= { .reloc_handler = apply_r_riscv_sub8_rela,
565 				    .accumulate_handler = apply_8_bit_accumulation },
566 	[R_RISCV_SUB16]		= { .reloc_handler = apply_r_riscv_sub16_rela,
567 				    .accumulate_handler = apply_16_bit_accumulation },
568 	[R_RISCV_SUB32]		= { .reloc_handler = apply_r_riscv_sub32_rela,
569 				    .accumulate_handler = apply_32_bit_accumulation },
570 	[R_RISCV_SUB64]		= { .reloc_handler = apply_r_riscv_sub64_rela,
571 				    .accumulate_handler = apply_64_bit_accumulation },
572 	/* 41-42 reserved for future standard use */
573 	[R_RISCV_ALIGN]		= { .reloc_handler = apply_r_riscv_align_rela },
574 	[R_RISCV_RVC_BRANCH]	= { .reloc_handler = apply_r_riscv_rvc_branch_rela },
575 	[R_RISCV_RVC_JUMP]	= { .reloc_handler = apply_r_riscv_rvc_jump_rela },
576 	/* 46-50 reserved for future standard use */
577 	[R_RISCV_RELAX]		= { .reloc_handler = apply_r_riscv_relax_rela },
578 	[R_RISCV_SUB6]		= { .reloc_handler = apply_r_riscv_sub6_rela,
579 				    .accumulate_handler = apply_6_bit_accumulation },
580 	[R_RISCV_SET6]		= { .reloc_handler = apply_r_riscv_set6_rela,
581 				    .accumulate_handler = apply_6_bit_accumulation },
582 	[R_RISCV_SET8]		= { .reloc_handler = apply_r_riscv_set8_rela,
583 				    .accumulate_handler = apply_8_bit_accumulation },
584 	[R_RISCV_SET16]		= { .reloc_handler = apply_r_riscv_set16_rela,
585 				    .accumulate_handler = apply_16_bit_accumulation },
586 	[R_RISCV_SET32]		= { .reloc_handler = apply_r_riscv_set32_rela,
587 				    .accumulate_handler = apply_32_bit_accumulation },
588 	[R_RISCV_32_PCREL]	= { .reloc_handler = apply_r_riscv_32_pcrel_rela },
589 	[R_RISCV_IRELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
590 	[R_RISCV_PLT32]		= { .reloc_handler = apply_r_riscv_plt32_rela },
591 	[R_RISCV_SET_ULEB128]	= { .reloc_handler = apply_r_riscv_set_uleb128,
592 				    .accumulate_handler = apply_uleb128_accumulation },
593 	[R_RISCV_SUB_ULEB128]	= { .reloc_handler = apply_r_riscv_sub_uleb128,
594 				    .accumulate_handler = apply_uleb128_accumulation },
595 	/* 62-191 reserved for future standard use */
596 	/* 192-255 nonstandard ABI extensions  */
597 };
598 
599 static void
600 process_accumulated_relocations(struct module *me,
601 				struct hlist_head **relocation_hashtable,
602 				struct list_head *used_buckets_list)
603 {
604 	/*
605 	 * Only ADD/SUB/SET/ULEB128 should end up here.
606 	 *
607 	 * Each bucket may have more than one relocation location. All
608 	 * relocations for a location are stored in a list in a bucket.
609 	 *
610 	 * Relocations are applied to a temp variable before being stored to the
611 	 * provided location to check for overflow. This also allows ULEB128 to
612 	 * properly decide how many entries are needed before storing to
613 	 * location. The final value is stored into location using the handler
614 	 * for the last relocation to an address.
615 	 *
616 	 * Three layers of indexing:
617 	 *	- Each of the buckets in use
618 	 *	- Groups of relocations in each bucket by location address
619 	 *	- Each relocation entry for a location address
620 	 */
621 	struct used_bucket *bucket_iter;
622 	struct used_bucket *bucket_iter_tmp;
623 	struct relocation_head *rel_head_iter;
624 	struct hlist_node *rel_head_iter_tmp;
625 	struct relocation_entry *rel_entry_iter;
626 	struct relocation_entry *rel_entry_iter_tmp;
627 	int curr_type;
628 	void *location;
629 	long buffer;
630 
631 	list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
632 				 used_buckets_list, head) {
633 		hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
634 					  bucket_iter->bucket, node) {
635 			buffer = 0;
636 			location = rel_head_iter->location;
637 			list_for_each_entry_safe(rel_entry_iter,
638 						 rel_entry_iter_tmp,
639 						 rel_head_iter->rel_entry,
640 						 head) {
641 				curr_type = rel_entry_iter->type;
642 				reloc_handlers[curr_type].reloc_handler(
643 					me, &buffer, rel_entry_iter->value);
644 				kfree(rel_entry_iter);
645 			}
646 			reloc_handlers[curr_type].accumulate_handler(
647 				me, location, buffer);
648 			kfree(rel_head_iter);
649 		}
650 		kfree(bucket_iter);
651 	}
652 
653 	kfree(*relocation_hashtable);
654 }
655 
656 static int add_relocation_to_accumulate(struct module *me, int type,
657 					void *location,
658 					unsigned int hashtable_bits, Elf_Addr v,
659 					struct hlist_head *relocation_hashtable,
660 					struct list_head *used_buckets_list)
661 {
662 	struct relocation_entry *entry;
663 	struct relocation_head *rel_head;
664 	struct hlist_head *current_head;
665 	struct used_bucket *bucket;
666 	unsigned long hash;
667 
668 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
669 
670 	if (!entry)
671 		return -ENOMEM;
672 
673 	INIT_LIST_HEAD(&entry->head);
674 	entry->type = type;
675 	entry->value = v;
676 
677 	hash = hash_min((uintptr_t)location, hashtable_bits);
678 
679 	current_head = &relocation_hashtable[hash];
680 
681 	/*
682 	 * Search for the relocation_head for the relocations that happen at the
683 	 * provided location
684 	 */
685 	bool found = false;
686 	struct relocation_head *rel_head_iter;
687 
688 	hlist_for_each_entry(rel_head_iter, current_head, node) {
689 		if (rel_head_iter->location == location) {
690 			found = true;
691 			rel_head = rel_head_iter;
692 			break;
693 		}
694 	}
695 
696 	/*
697 	 * If there has not yet been any relocations at the provided location,
698 	 * create a relocation_head for that location and populate it with this
699 	 * relocation_entry.
700 	 */
701 	if (!found) {
702 		rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
703 
704 		if (!rel_head) {
705 			kfree(entry);
706 			return -ENOMEM;
707 		}
708 
709 		rel_head->rel_entry =
710 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
711 
712 		if (!rel_head->rel_entry) {
713 			kfree(entry);
714 			kfree(rel_head);
715 			return -ENOMEM;
716 		}
717 
718 		INIT_LIST_HEAD(rel_head->rel_entry);
719 		rel_head->location = location;
720 		INIT_HLIST_NODE(&rel_head->node);
721 		if (!current_head->first) {
722 			bucket =
723 				kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
724 
725 			if (!bucket) {
726 				kfree(entry);
727 				kfree(rel_head->rel_entry);
728 				kfree(rel_head);
729 				return -ENOMEM;
730 			}
731 
732 			INIT_LIST_HEAD(&bucket->head);
733 			bucket->bucket = current_head;
734 			list_add(&bucket->head, used_buckets_list);
735 		}
736 		hlist_add_head(&rel_head->node, current_head);
737 	}
738 
739 	/* Add relocation to head of discovered rel_head */
740 	list_add_tail(&entry->head, rel_head->rel_entry);
741 
742 	return 0;
743 }
744 
745 static unsigned int
746 initialize_relocation_hashtable(unsigned int num_relocations,
747 				struct hlist_head **relocation_hashtable)
748 {
749 	/* Can safely assume that bits is not greater than sizeof(long) */
750 	unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
751 	/*
752 	 * When hashtable_size == 1, hashtable_bits == 0.
753 	 * This is valid because the hashing algorithm returns 0 in this case.
754 	 */
755 	unsigned int hashtable_bits = ilog2(hashtable_size);
756 
757 	/*
758 	 * Double size of hashtable if num_relocations * 1.25 is greater than
759 	 * hashtable_size.
760 	 */
761 	int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
762 
763 	hashtable_bits += should_double_size;
764 
765 	hashtable_size <<= should_double_size;
766 
767 	*relocation_hashtable = kmalloc_array(hashtable_size,
768 					      sizeof(**relocation_hashtable),
769 					      GFP_KERNEL);
770 	if (!*relocation_hashtable)
771 		return 0;
772 
773 	__hash_init(*relocation_hashtable, hashtable_size);
774 
775 	return hashtable_bits;
776 }
777 
778 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
779 		       unsigned int symindex, unsigned int relsec,
780 		       struct module *me)
781 {
782 	Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
783 	int (*handler)(struct module *me, void *location, Elf_Addr v);
784 	Elf_Sym *sym;
785 	void *location;
786 	unsigned int i, type;
787 	unsigned int j_idx = 0;
788 	Elf_Addr v;
789 	int res;
790 	unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
791 	struct hlist_head *relocation_hashtable;
792 	struct list_head used_buckets_list;
793 	unsigned int hashtable_bits;
794 
795 	hashtable_bits = initialize_relocation_hashtable(num_relocations,
796 							 &relocation_hashtable);
797 
798 	if (!relocation_hashtable)
799 		return -ENOMEM;
800 
801 	INIT_LIST_HEAD(&used_buckets_list);
802 
803 	pr_debug("Applying relocate section %u to %u\n", relsec,
804 	       sechdrs[relsec].sh_info);
805 
806 	for (i = 0; i < num_relocations; i++) {
807 		/* This is where to make the change */
808 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
809 			+ rel[i].r_offset;
810 		/* This is the symbol it is referring to */
811 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
812 			+ ELF_RISCV_R_SYM(rel[i].r_info);
813 		if (IS_ERR_VALUE(sym->st_value)) {
814 			/* Ignore unresolved weak symbol */
815 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
816 				continue;
817 			pr_warn("%s: Unknown symbol %s\n",
818 				me->name, strtab + sym->st_name);
819 			return -ENOENT;
820 		}
821 
822 		type = ELF_RISCV_R_TYPE(rel[i].r_info);
823 
824 		if (type < ARRAY_SIZE(reloc_handlers))
825 			handler = reloc_handlers[type].reloc_handler;
826 		else
827 			handler = NULL;
828 
829 		if (!handler) {
830 			pr_err("%s: Unknown relocation type %u\n",
831 			       me->name, type);
832 			return -EINVAL;
833 		}
834 
835 		v = sym->st_value + rel[i].r_addend;
836 
837 		if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
838 			unsigned int j = j_idx;
839 			bool found = false;
840 
841 			do {
842 				unsigned long hi20_loc =
843 					sechdrs[sechdrs[relsec].sh_info].sh_addr
844 					+ rel[j].r_offset;
845 				u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
846 
847 				/* Find the corresponding HI20 relocation entry */
848 				if (hi20_loc == sym->st_value
849 				    && (hi20_type == R_RISCV_PCREL_HI20
850 					|| hi20_type == R_RISCV_GOT_HI20)) {
851 					s32 hi20, lo12;
852 					Elf_Sym *hi20_sym =
853 						(Elf_Sym *)sechdrs[symindex].sh_addr
854 						+ ELF_RISCV_R_SYM(rel[j].r_info);
855 					unsigned long hi20_sym_val =
856 						hi20_sym->st_value
857 						+ rel[j].r_addend;
858 
859 					/* Calculate lo12 */
860 					size_t offset = hi20_sym_val - hi20_loc;
861 					if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
862 					    && hi20_type == R_RISCV_GOT_HI20) {
863 						offset = module_emit_got_entry(
864 							 me, hi20_sym_val);
865 						offset = offset - hi20_loc;
866 					}
867 					hi20 = (offset + 0x800) & 0xfffff000;
868 					lo12 = offset - hi20;
869 					v = lo12;
870 					found = true;
871 
872 					break;
873 				}
874 
875 				j++;
876 				if (j > sechdrs[relsec].sh_size / sizeof(*rel))
877 					j = 0;
878 
879 			} while (j_idx != j);
880 
881 			if (!found) {
882 				pr_err(
883 				  "%s: Can not find HI20 relocation information\n",
884 				  me->name);
885 				return -EINVAL;
886 			}
887 
888 			/* Record the previous j-loop end index */
889 			j_idx = j;
890 		}
891 
892 		if (reloc_handlers[type].accumulate_handler)
893 			res = add_relocation_to_accumulate(me, type, location,
894 							   hashtable_bits, v,
895 							   relocation_hashtable,
896 							   &used_buckets_list);
897 		else
898 			res = handler(me, location, v);
899 		if (res)
900 			return res;
901 	}
902 
903 	process_accumulated_relocations(me, &relocation_hashtable,
904 					&used_buckets_list);
905 
906 	return 0;
907 }
908 
909 #ifdef CONFIG_MMU
910 static struct execmem_info execmem_info __ro_after_init;
911 
912 struct execmem_info __init *execmem_arch_setup(void)
913 {
914 	execmem_info = (struct execmem_info){
915 		.ranges = {
916 			[EXECMEM_DEFAULT] = {
917 				.start	= MODULES_VADDR,
918 				.end	= MODULES_END,
919 				.pgprot	= PAGE_KERNEL,
920 				.alignment = 1,
921 			},
922 			[EXECMEM_KPROBES] = {
923 				.start	= VMALLOC_START,
924 				.end	= VMALLOC_END,
925 				.pgprot	= PAGE_KERNEL_READ_EXEC,
926 				.alignment = 1,
927 			},
928 			[EXECMEM_BPF] = {
929 				.start	= BPF_JIT_REGION_START,
930 				.end	= BPF_JIT_REGION_END,
931 				.pgprot	= PAGE_KERNEL,
932 				.alignment = PAGE_SIZE,
933 			},
934 		},
935 	};
936 
937 	return &execmem_info;
938 }
939 #endif
940 
941 int module_finalize(const Elf_Ehdr *hdr,
942 		    const Elf_Shdr *sechdrs,
943 		    struct module *me)
944 {
945 	const Elf_Shdr *s;
946 
947 	s = find_section(hdr, sechdrs, ".alternative");
948 	if (s)
949 		apply_module_alternatives((void *)s->sh_addr, s->sh_size);
950 
951 	return 0;
952 }
953