xref: /linux/arch/riscv/kernel/module.c (revision dbcedec3a31119d7594baacc743300d127c99c56)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Copyright (C) 2017 Zihao Yu
5  */
6 
7 #include <linux/elf.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/hashtable.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/moduleloader.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sizes.h>
16 #include <linux/pgtable.h>
17 #include <asm/alternative.h>
18 #include <asm/sections.h>
19 
20 struct used_bucket {
21 	struct list_head head;
22 	struct hlist_head *bucket;
23 };
24 
25 struct relocation_head {
26 	struct hlist_node node;
27 	struct list_head *rel_entry;
28 	void *location;
29 };
30 
31 struct relocation_entry {
32 	struct list_head head;
33 	Elf_Addr value;
34 	unsigned int type;
35 };
36 
37 struct relocation_handlers {
38 	int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
39 	int (*accumulate_handler)(struct module *me, void *location,
40 				  long buffer);
41 };
42 
43 /*
44  * The auipc+jalr instruction pair can reach any PC-relative offset
45  * in the range [-2^31 - 2^11, 2^31 - 2^11)
46  */
47 static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
48 {
49 #ifdef CONFIG_32BIT
50 	return true;
51 #else
52 	return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
53 #endif
54 }
55 
56 static int riscv_insn_rmw(void *location, u32 keep, u32 set)
57 {
58 	__le16 *parcel = location;
59 	u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
60 
61 	insn &= keep;
62 	insn |= set;
63 
64 	parcel[0] = cpu_to_le16(insn);
65 	parcel[1] = cpu_to_le16(insn >> 16);
66 	return 0;
67 }
68 
69 static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
70 {
71 	__le16 *parcel = location;
72 	u16 insn = le16_to_cpu(*parcel);
73 
74 	insn &= keep;
75 	insn |= set;
76 
77 	*parcel = cpu_to_le16(insn);
78 	return 0;
79 }
80 
81 static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
82 {
83 	if (v != (u32)v) {
84 		pr_err("%s: value %016llx out of range for 32-bit field\n",
85 		       me->name, (long long)v);
86 		return -EINVAL;
87 	}
88 	*(u32 *)location = v;
89 	return 0;
90 }
91 
92 static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
93 {
94 	*(u64 *)location = v;
95 	return 0;
96 }
97 
98 static int apply_r_riscv_branch_rela(struct module *me, void *location,
99 				     Elf_Addr v)
100 {
101 	ptrdiff_t offset = (void *)v - location;
102 	u32 imm12 = (offset & 0x1000) << (31 - 12);
103 	u32 imm11 = (offset & 0x800) >> (11 - 7);
104 	u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
105 	u32 imm4_1 = (offset & 0x1e) << (11 - 4);
106 
107 	return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
108 }
109 
110 static int apply_r_riscv_jal_rela(struct module *me, void *location,
111 				  Elf_Addr v)
112 {
113 	ptrdiff_t offset = (void *)v - location;
114 	u32 imm20 = (offset & 0x100000) << (31 - 20);
115 	u32 imm19_12 = (offset & 0xff000);
116 	u32 imm11 = (offset & 0x800) << (20 - 11);
117 	u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
118 
119 	return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
120 }
121 
122 static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
123 					 Elf_Addr v)
124 {
125 	ptrdiff_t offset = (void *)v - location;
126 	u16 imm8 = (offset & 0x100) << (12 - 8);
127 	u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
128 	u16 imm5 = (offset & 0x20) >> (5 - 2);
129 	u16 imm4_3 = (offset & 0x18) << (12 - 5);
130 	u16 imm2_1 = (offset & 0x6) << (12 - 10);
131 
132 	return riscv_insn_rvc_rmw(location, 0xe383,
133 			imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
134 }
135 
136 static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
137 				       Elf_Addr v)
138 {
139 	ptrdiff_t offset = (void *)v - location;
140 	u16 imm11 = (offset & 0x800) << (12 - 11);
141 	u16 imm10 = (offset & 0x400) >> (10 - 8);
142 	u16 imm9_8 = (offset & 0x300) << (12 - 11);
143 	u16 imm7 = (offset & 0x80) >> (7 - 6);
144 	u16 imm6 = (offset & 0x40) << (12 - 11);
145 	u16 imm5 = (offset & 0x20) >> (5 - 2);
146 	u16 imm4 = (offset & 0x10) << (12 - 5);
147 	u16 imm3_1 = (offset & 0xe) << (12 - 10);
148 
149 	return riscv_insn_rvc_rmw(location, 0xe003,
150 			imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
151 }
152 
153 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
154 					 Elf_Addr v)
155 {
156 	ptrdiff_t offset = (void *)v - location;
157 
158 	if (!riscv_insn_valid_32bit_offset(offset)) {
159 		pr_err(
160 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
161 		  me->name, (long long)v, location);
162 		return -EINVAL;
163 	}
164 
165 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
166 }
167 
168 static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
169 					   Elf_Addr v)
170 {
171 	/*
172 	 * v is the lo12 value to fill. It is calculated before calling this
173 	 * handler.
174 	 */
175 	return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
176 }
177 
178 static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
179 					   Elf_Addr v)
180 {
181 	/*
182 	 * v is the lo12 value to fill. It is calculated before calling this
183 	 * handler.
184 	 */
185 	u32 imm11_5 = (v & 0xfe0) << (31 - 11);
186 	u32 imm4_0 = (v & 0x1f) << (11 - 4);
187 
188 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
189 }
190 
191 static int apply_r_riscv_hi20_rela(struct module *me, void *location,
192 				   Elf_Addr v)
193 {
194 	if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
195 		pr_err(
196 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
197 		  me->name, (long long)v, location);
198 		return -EINVAL;
199 	}
200 
201 	return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
202 }
203 
204 static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
205 				     Elf_Addr v)
206 {
207 	/* Skip medlow checking because of filtering by HI20 already */
208 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
209 	s32 lo12 = ((s32)v - hi20);
210 
211 	return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
212 }
213 
214 static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
215 				     Elf_Addr v)
216 {
217 	/* Skip medlow checking because of filtering by HI20 already */
218 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
219 	s32 lo12 = ((s32)v - hi20);
220 	u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
221 	u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
222 
223 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
224 }
225 
226 static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
227 				       Elf_Addr v)
228 {
229 	ptrdiff_t offset = (void *)v - location;
230 
231 	/* Always emit the got entry */
232 	if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
233 		offset = (void *)module_emit_got_entry(me, v) - location;
234 	} else {
235 		pr_err(
236 		  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
237 		  me->name, (long long)v, location);
238 		return -EINVAL;
239 	}
240 
241 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
242 }
243 
244 static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
245 				       Elf_Addr v)
246 {
247 	ptrdiff_t offset = (void *)v - location;
248 	u32 hi20, lo12;
249 
250 	if (!riscv_insn_valid_32bit_offset(offset)) {
251 		/* Only emit the plt entry if offset over 32-bit range */
252 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
253 			offset = (void *)module_emit_plt_entry(me, v) - location;
254 		} else {
255 			pr_err(
256 			  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
257 			  me->name, (long long)v, location);
258 			return -EINVAL;
259 		}
260 	}
261 
262 	hi20 = (offset + 0x800) & 0xfffff000;
263 	lo12 = (offset - hi20) & 0xfff;
264 	riscv_insn_rmw(location, 0xfff, hi20);
265 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
266 }
267 
268 static int apply_r_riscv_call_rela(struct module *me, void *location,
269 				   Elf_Addr v)
270 {
271 	ptrdiff_t offset = (void *)v - location;
272 	u32 hi20, lo12;
273 
274 	if (!riscv_insn_valid_32bit_offset(offset)) {
275 		pr_err(
276 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
277 		  me->name, (long long)v, location);
278 		return -EINVAL;
279 	}
280 
281 	hi20 = (offset + 0x800) & 0xfffff000;
282 	lo12 = (offset - hi20) & 0xfff;
283 	riscv_insn_rmw(location, 0xfff, hi20);
284 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
285 }
286 
287 static int apply_r_riscv_relax_rela(struct module *me, void *location,
288 				    Elf_Addr v)
289 {
290 	return 0;
291 }
292 
293 static int apply_r_riscv_align_rela(struct module *me, void *location,
294 				    Elf_Addr v)
295 {
296 	pr_err(
297 	  "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
298 	  me->name, location);
299 	return -EINVAL;
300 }
301 
302 static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
303 {
304 	*(u8 *)location += (u8)v;
305 	return 0;
306 }
307 
308 static int apply_r_riscv_add16_rela(struct module *me, void *location,
309 				    Elf_Addr v)
310 {
311 	*(u16 *)location += (u16)v;
312 	return 0;
313 }
314 
315 static int apply_r_riscv_add32_rela(struct module *me, void *location,
316 				    Elf_Addr v)
317 {
318 	*(u32 *)location += (u32)v;
319 	return 0;
320 }
321 
322 static int apply_r_riscv_add64_rela(struct module *me, void *location,
323 				    Elf_Addr v)
324 {
325 	*(u64 *)location += (u64)v;
326 	return 0;
327 }
328 
329 static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
330 {
331 	*(u8 *)location -= (u8)v;
332 	return 0;
333 }
334 
335 static int apply_r_riscv_sub16_rela(struct module *me, void *location,
336 				    Elf_Addr v)
337 {
338 	*(u16 *)location -= (u16)v;
339 	return 0;
340 }
341 
342 static int apply_r_riscv_sub32_rela(struct module *me, void *location,
343 				    Elf_Addr v)
344 {
345 	*(u32 *)location -= (u32)v;
346 	return 0;
347 }
348 
349 static int apply_r_riscv_sub64_rela(struct module *me, void *location,
350 				    Elf_Addr v)
351 {
352 	*(u64 *)location -= (u64)v;
353 	return 0;
354 }
355 
356 static int dynamic_linking_not_supported(struct module *me, void *location,
357 					 Elf_Addr v)
358 {
359 	pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
360 	       me->name, location);
361 	return -EINVAL;
362 }
363 
364 static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
365 {
366 	pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
367 	       me->name, location);
368 	return -EINVAL;
369 }
370 
371 static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
372 {
373 	u8 *byte = location;
374 	u8 value = v;
375 
376 	*byte = (*byte - (value & 0x3f)) & 0x3f;
377 	return 0;
378 }
379 
380 static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
381 {
382 	u8 *byte = location;
383 	u8 value = v;
384 
385 	*byte = (*byte & 0xc0) | (value & 0x3f);
386 	return 0;
387 }
388 
389 static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
390 {
391 	*(u8 *)location = (u8)v;
392 	return 0;
393 }
394 
395 static int apply_r_riscv_set16_rela(struct module *me, void *location,
396 				    Elf_Addr v)
397 {
398 	*(u16 *)location = (u16)v;
399 	return 0;
400 }
401 
402 static int apply_r_riscv_set32_rela(struct module *me, void *location,
403 				    Elf_Addr v)
404 {
405 	*(u32 *)location = (u32)v;
406 	return 0;
407 }
408 
409 static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
410 				       Elf_Addr v)
411 {
412 	*(u32 *)location = v - (uintptr_t)location;
413 	return 0;
414 }
415 
416 static int apply_r_riscv_plt32_rela(struct module *me, void *location,
417 				    Elf_Addr v)
418 {
419 	ptrdiff_t offset = (void *)v - location;
420 
421 	if (!riscv_insn_valid_32bit_offset(offset)) {
422 		/* Only emit the plt entry if offset over 32-bit range */
423 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
424 			offset = (void *)module_emit_plt_entry(me, v) - location;
425 		} else {
426 			pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
427 			       me->name, (long long)v, location);
428 			return -EINVAL;
429 		}
430 	}
431 
432 	*(u32 *)location = (u32)offset;
433 	return 0;
434 }
435 
436 static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
437 {
438 	*(long *)location = v;
439 	return 0;
440 }
441 
442 static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
443 {
444 	*(long *)location -= v;
445 	return 0;
446 }
447 
448 static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
449 {
450 	u8 *byte = location;
451 	u8 value = buffer;
452 
453 	if (buffer > 0x3f) {
454 		pr_err("%s: value %ld out of range for 6-bit relocation.\n",
455 		       me->name, buffer);
456 		return -EINVAL;
457 	}
458 
459 	*byte = (*byte & 0xc0) | (value & 0x3f);
460 	return 0;
461 }
462 
463 static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
464 {
465 	if (buffer > U8_MAX) {
466 		pr_err("%s: value %ld out of range for 8-bit relocation.\n",
467 		       me->name, buffer);
468 		return -EINVAL;
469 	}
470 	*(u8 *)location = (u8)buffer;
471 	return 0;
472 }
473 
474 static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
475 {
476 	if (buffer > U16_MAX) {
477 		pr_err("%s: value %ld out of range for 16-bit relocation.\n",
478 		       me->name, buffer);
479 		return -EINVAL;
480 	}
481 	*(u16 *)location = (u16)buffer;
482 	return 0;
483 }
484 
485 static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
486 {
487 	if (buffer > U32_MAX) {
488 		pr_err("%s: value %ld out of range for 32-bit relocation.\n",
489 		       me->name, buffer);
490 		return -EINVAL;
491 	}
492 	*(u32 *)location = (u32)buffer;
493 	return 0;
494 }
495 
496 static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
497 {
498 	*(u64 *)location = (u64)buffer;
499 	return 0;
500 }
501 
502 static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
503 {
504 	/*
505 	 * ULEB128 is a variable length encoding. Encode the buffer into
506 	 * the ULEB128 data format.
507 	 */
508 	u8 *p = location;
509 
510 	while (buffer != 0) {
511 		u8 value = buffer & 0x7f;
512 
513 		buffer >>= 7;
514 		value |= (!!buffer) << 7;
515 
516 		*p++ = value;
517 	}
518 	return 0;
519 }
520 
521 /*
522  * Relocations defined in the riscv-elf-psabi-doc.
523  * This handles static linking only.
524  */
525 static const struct relocation_handlers reloc_handlers[] = {
526 	[R_RISCV_32]		= { .reloc_handler = apply_r_riscv_32_rela },
527 	[R_RISCV_64]		= { .reloc_handler = apply_r_riscv_64_rela },
528 	[R_RISCV_RELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
529 	[R_RISCV_COPY]		= { .reloc_handler = dynamic_linking_not_supported },
530 	[R_RISCV_JUMP_SLOT]	= { .reloc_handler = dynamic_linking_not_supported },
531 	[R_RISCV_TLS_DTPMOD32]	= { .reloc_handler = dynamic_linking_not_supported },
532 	[R_RISCV_TLS_DTPMOD64]	= { .reloc_handler = dynamic_linking_not_supported },
533 	[R_RISCV_TLS_DTPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
534 	[R_RISCV_TLS_DTPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
535 	[R_RISCV_TLS_TPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
536 	[R_RISCV_TLS_TPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
537 	/* 12-15 undefined */
538 	[R_RISCV_BRANCH]	= { .reloc_handler = apply_r_riscv_branch_rela },
539 	[R_RISCV_JAL]		= { .reloc_handler = apply_r_riscv_jal_rela },
540 	[R_RISCV_CALL]		= { .reloc_handler = apply_r_riscv_call_rela },
541 	[R_RISCV_CALL_PLT]	= { .reloc_handler = apply_r_riscv_call_plt_rela },
542 	[R_RISCV_GOT_HI20]	= { .reloc_handler = apply_r_riscv_got_hi20_rela },
543 	[R_RISCV_TLS_GOT_HI20]	= { .reloc_handler = tls_not_supported },
544 	[R_RISCV_TLS_GD_HI20]	= { .reloc_handler = tls_not_supported },
545 	[R_RISCV_PCREL_HI20]	= { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
546 	[R_RISCV_PCREL_LO12_I]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
547 	[R_RISCV_PCREL_LO12_S]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
548 	[R_RISCV_HI20]		= { .reloc_handler = apply_r_riscv_hi20_rela },
549 	[R_RISCV_LO12_I]	= { .reloc_handler = apply_r_riscv_lo12_i_rela },
550 	[R_RISCV_LO12_S]	= { .reloc_handler = apply_r_riscv_lo12_s_rela },
551 	[R_RISCV_TPREL_HI20]	= { .reloc_handler = tls_not_supported },
552 	[R_RISCV_TPREL_LO12_I]	= { .reloc_handler = tls_not_supported },
553 	[R_RISCV_TPREL_LO12_S]	= { .reloc_handler = tls_not_supported },
554 	[R_RISCV_TPREL_ADD]	= { .reloc_handler = tls_not_supported },
555 	[R_RISCV_ADD8]		= { .reloc_handler = apply_r_riscv_add8_rela,
556 				    .accumulate_handler = apply_8_bit_accumulation },
557 	[R_RISCV_ADD16]		= { .reloc_handler = apply_r_riscv_add16_rela,
558 				    .accumulate_handler = apply_16_bit_accumulation },
559 	[R_RISCV_ADD32]		= { .reloc_handler = apply_r_riscv_add32_rela,
560 				    .accumulate_handler = apply_32_bit_accumulation },
561 	[R_RISCV_ADD64]		= { .reloc_handler = apply_r_riscv_add64_rela,
562 				    .accumulate_handler = apply_64_bit_accumulation },
563 	[R_RISCV_SUB8]		= { .reloc_handler = apply_r_riscv_sub8_rela,
564 				    .accumulate_handler = apply_8_bit_accumulation },
565 	[R_RISCV_SUB16]		= { .reloc_handler = apply_r_riscv_sub16_rela,
566 				    .accumulate_handler = apply_16_bit_accumulation },
567 	[R_RISCV_SUB32]		= { .reloc_handler = apply_r_riscv_sub32_rela,
568 				    .accumulate_handler = apply_32_bit_accumulation },
569 	[R_RISCV_SUB64]		= { .reloc_handler = apply_r_riscv_sub64_rela,
570 				    .accumulate_handler = apply_64_bit_accumulation },
571 	/* 41-42 reserved for future standard use */
572 	[R_RISCV_ALIGN]		= { .reloc_handler = apply_r_riscv_align_rela },
573 	[R_RISCV_RVC_BRANCH]	= { .reloc_handler = apply_r_riscv_rvc_branch_rela },
574 	[R_RISCV_RVC_JUMP]	= { .reloc_handler = apply_r_riscv_rvc_jump_rela },
575 	/* 46-50 reserved for future standard use */
576 	[R_RISCV_RELAX]		= { .reloc_handler = apply_r_riscv_relax_rela },
577 	[R_RISCV_SUB6]		= { .reloc_handler = apply_r_riscv_sub6_rela,
578 				    .accumulate_handler = apply_6_bit_accumulation },
579 	[R_RISCV_SET6]		= { .reloc_handler = apply_r_riscv_set6_rela,
580 				    .accumulate_handler = apply_6_bit_accumulation },
581 	[R_RISCV_SET8]		= { .reloc_handler = apply_r_riscv_set8_rela,
582 				    .accumulate_handler = apply_8_bit_accumulation },
583 	[R_RISCV_SET16]		= { .reloc_handler = apply_r_riscv_set16_rela,
584 				    .accumulate_handler = apply_16_bit_accumulation },
585 	[R_RISCV_SET32]		= { .reloc_handler = apply_r_riscv_set32_rela,
586 				    .accumulate_handler = apply_32_bit_accumulation },
587 	[R_RISCV_32_PCREL]	= { .reloc_handler = apply_r_riscv_32_pcrel_rela },
588 	[R_RISCV_IRELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
589 	[R_RISCV_PLT32]		= { .reloc_handler = apply_r_riscv_plt32_rela },
590 	[R_RISCV_SET_ULEB128]	= { .reloc_handler = apply_r_riscv_set_uleb128,
591 				    .accumulate_handler = apply_uleb128_accumulation },
592 	[R_RISCV_SUB_ULEB128]	= { .reloc_handler = apply_r_riscv_sub_uleb128,
593 				    .accumulate_handler = apply_uleb128_accumulation },
594 	/* 62-191 reserved for future standard use */
595 	/* 192-255 nonstandard ABI extensions  */
596 };
597 
598 static void
599 process_accumulated_relocations(struct module *me,
600 				struct hlist_head **relocation_hashtable,
601 				struct list_head *used_buckets_list)
602 {
603 	/*
604 	 * Only ADD/SUB/SET/ULEB128 should end up here.
605 	 *
606 	 * Each bucket may have more than one relocation location. All
607 	 * relocations for a location are stored in a list in a bucket.
608 	 *
609 	 * Relocations are applied to a temp variable before being stored to the
610 	 * provided location to check for overflow. This also allows ULEB128 to
611 	 * properly decide how many entries are needed before storing to
612 	 * location. The final value is stored into location using the handler
613 	 * for the last relocation to an address.
614 	 *
615 	 * Three layers of indexing:
616 	 *	- Each of the buckets in use
617 	 *	- Groups of relocations in each bucket by location address
618 	 *	- Each relocation entry for a location address
619 	 */
620 	struct used_bucket *bucket_iter;
621 	struct used_bucket *bucket_iter_tmp;
622 	struct relocation_head *rel_head_iter;
623 	struct hlist_node *rel_head_iter_tmp;
624 	struct relocation_entry *rel_entry_iter;
625 	struct relocation_entry *rel_entry_iter_tmp;
626 	int curr_type;
627 	void *location;
628 	long buffer;
629 
630 	list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
631 				 used_buckets_list, head) {
632 		hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
633 					  bucket_iter->bucket, node) {
634 			buffer = 0;
635 			location = rel_head_iter->location;
636 			list_for_each_entry_safe(rel_entry_iter,
637 						 rel_entry_iter_tmp,
638 						 rel_head_iter->rel_entry,
639 						 head) {
640 				curr_type = rel_entry_iter->type;
641 				reloc_handlers[curr_type].reloc_handler(
642 					me, &buffer, rel_entry_iter->value);
643 				kfree(rel_entry_iter);
644 			}
645 			reloc_handlers[curr_type].accumulate_handler(
646 				me, location, buffer);
647 			kfree(rel_head_iter);
648 		}
649 		kfree(bucket_iter);
650 	}
651 
652 	kfree(*relocation_hashtable);
653 }
654 
655 static int add_relocation_to_accumulate(struct module *me, int type,
656 					void *location,
657 					unsigned int hashtable_bits, Elf_Addr v,
658 					struct hlist_head *relocation_hashtable,
659 					struct list_head *used_buckets_list)
660 {
661 	struct relocation_entry *entry;
662 	struct relocation_head *rel_head;
663 	struct hlist_head *current_head;
664 	struct used_bucket *bucket;
665 	unsigned long hash;
666 
667 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
668 
669 	if (!entry)
670 		return -ENOMEM;
671 
672 	INIT_LIST_HEAD(&entry->head);
673 	entry->type = type;
674 	entry->value = v;
675 
676 	hash = hash_min((uintptr_t)location, hashtable_bits);
677 
678 	current_head = &relocation_hashtable[hash];
679 
680 	/*
681 	 * Search for the relocation_head for the relocations that happen at the
682 	 * provided location
683 	 */
684 	bool found = false;
685 	struct relocation_head *rel_head_iter;
686 
687 	hlist_for_each_entry(rel_head_iter, current_head, node) {
688 		if (rel_head_iter->location == location) {
689 			found = true;
690 			rel_head = rel_head_iter;
691 			break;
692 		}
693 	}
694 
695 	/*
696 	 * If there has not yet been any relocations at the provided location,
697 	 * create a relocation_head for that location and populate it with this
698 	 * relocation_entry.
699 	 */
700 	if (!found) {
701 		rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
702 
703 		if (!rel_head) {
704 			kfree(entry);
705 			return -ENOMEM;
706 		}
707 
708 		rel_head->rel_entry =
709 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
710 
711 		if (!rel_head->rel_entry) {
712 			kfree(entry);
713 			kfree(rel_head);
714 			return -ENOMEM;
715 		}
716 
717 		INIT_LIST_HEAD(rel_head->rel_entry);
718 		rel_head->location = location;
719 		INIT_HLIST_NODE(&rel_head->node);
720 		if (!current_head->first) {
721 			bucket =
722 				kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
723 
724 			if (!bucket) {
725 				kfree(entry);
726 				kfree(rel_head->rel_entry);
727 				kfree(rel_head);
728 				return -ENOMEM;
729 			}
730 
731 			INIT_LIST_HEAD(&bucket->head);
732 			bucket->bucket = current_head;
733 			list_add(&bucket->head, used_buckets_list);
734 		}
735 		hlist_add_head(&rel_head->node, current_head);
736 	}
737 
738 	/* Add relocation to head of discovered rel_head */
739 	list_add_tail(&entry->head, rel_head->rel_entry);
740 
741 	return 0;
742 }
743 
744 static unsigned int
745 initialize_relocation_hashtable(unsigned int num_relocations,
746 				struct hlist_head **relocation_hashtable)
747 {
748 	/* Can safely assume that bits is not greater than sizeof(long) */
749 	unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
750 	/*
751 	 * When hashtable_size == 1, hashtable_bits == 0.
752 	 * This is valid because the hashing algorithm returns 0 in this case.
753 	 */
754 	unsigned int hashtable_bits = ilog2(hashtable_size);
755 
756 	/*
757 	 * Double size of hashtable if num_relocations * 1.25 is greater than
758 	 * hashtable_size.
759 	 */
760 	int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
761 
762 	hashtable_bits += should_double_size;
763 
764 	hashtable_size <<= should_double_size;
765 
766 	*relocation_hashtable = kmalloc_array(hashtable_size,
767 					      sizeof(**relocation_hashtable),
768 					      GFP_KERNEL);
769 	if (!*relocation_hashtable)
770 		return 0;
771 
772 	__hash_init(*relocation_hashtable, hashtable_size);
773 
774 	return hashtable_bits;
775 }
776 
777 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
778 		       unsigned int symindex, unsigned int relsec,
779 		       struct module *me)
780 {
781 	Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
782 	int (*handler)(struct module *me, void *location, Elf_Addr v);
783 	Elf_Sym *sym;
784 	void *location;
785 	unsigned int i, type;
786 	unsigned int j_idx = 0;
787 	Elf_Addr v;
788 	int res;
789 	unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
790 	struct hlist_head *relocation_hashtable;
791 	struct list_head used_buckets_list;
792 	unsigned int hashtable_bits;
793 
794 	hashtable_bits = initialize_relocation_hashtable(num_relocations,
795 							 &relocation_hashtable);
796 
797 	if (!relocation_hashtable)
798 		return -ENOMEM;
799 
800 	INIT_LIST_HEAD(&used_buckets_list);
801 
802 	pr_debug("Applying relocate section %u to %u\n", relsec,
803 	       sechdrs[relsec].sh_info);
804 
805 	for (i = 0; i < num_relocations; i++) {
806 		/* This is where to make the change */
807 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
808 			+ rel[i].r_offset;
809 		/* This is the symbol it is referring to */
810 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
811 			+ ELF_RISCV_R_SYM(rel[i].r_info);
812 		if (IS_ERR_VALUE(sym->st_value)) {
813 			/* Ignore unresolved weak symbol */
814 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
815 				continue;
816 			pr_warn("%s: Unknown symbol %s\n",
817 				me->name, strtab + sym->st_name);
818 			return -ENOENT;
819 		}
820 
821 		type = ELF_RISCV_R_TYPE(rel[i].r_info);
822 
823 		if (type < ARRAY_SIZE(reloc_handlers))
824 			handler = reloc_handlers[type].reloc_handler;
825 		else
826 			handler = NULL;
827 
828 		if (!handler) {
829 			pr_err("%s: Unknown relocation type %u\n",
830 			       me->name, type);
831 			return -EINVAL;
832 		}
833 
834 		v = sym->st_value + rel[i].r_addend;
835 
836 		if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
837 			unsigned int j = j_idx;
838 			bool found = false;
839 
840 			do {
841 				unsigned long hi20_loc =
842 					sechdrs[sechdrs[relsec].sh_info].sh_addr
843 					+ rel[j].r_offset;
844 				u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
845 
846 				/* Find the corresponding HI20 relocation entry */
847 				if (hi20_loc == sym->st_value
848 				    && (hi20_type == R_RISCV_PCREL_HI20
849 					|| hi20_type == R_RISCV_GOT_HI20)) {
850 					s32 hi20, lo12;
851 					Elf_Sym *hi20_sym =
852 						(Elf_Sym *)sechdrs[symindex].sh_addr
853 						+ ELF_RISCV_R_SYM(rel[j].r_info);
854 					unsigned long hi20_sym_val =
855 						hi20_sym->st_value
856 						+ rel[j].r_addend;
857 
858 					/* Calculate lo12 */
859 					size_t offset = hi20_sym_val - hi20_loc;
860 					if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
861 					    && hi20_type == R_RISCV_GOT_HI20) {
862 						offset = module_emit_got_entry(
863 							 me, hi20_sym_val);
864 						offset = offset - hi20_loc;
865 					}
866 					hi20 = (offset + 0x800) & 0xfffff000;
867 					lo12 = offset - hi20;
868 					v = lo12;
869 					found = true;
870 
871 					break;
872 				}
873 
874 				j++;
875 				if (j > sechdrs[relsec].sh_size / sizeof(*rel))
876 					j = 0;
877 
878 			} while (j_idx != j);
879 
880 			if (!found) {
881 				pr_err(
882 				  "%s: Can not find HI20 relocation information\n",
883 				  me->name);
884 				return -EINVAL;
885 			}
886 
887 			/* Record the previous j-loop end index */
888 			j_idx = j;
889 		}
890 
891 		if (reloc_handlers[type].accumulate_handler)
892 			res = add_relocation_to_accumulate(me, type, location,
893 							   hashtable_bits, v,
894 							   relocation_hashtable,
895 							   &used_buckets_list);
896 		else
897 			res = handler(me, location, v);
898 		if (res)
899 			return res;
900 	}
901 
902 	process_accumulated_relocations(me, &relocation_hashtable,
903 					&used_buckets_list);
904 
905 	return 0;
906 }
907 
908 #if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
909 void *module_alloc(unsigned long size)
910 {
911 	return __vmalloc_node_range(size, 1, MODULES_VADDR,
912 				    MODULES_END, GFP_KERNEL,
913 				    PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
914 				    NUMA_NO_NODE,
915 				    __builtin_return_address(0));
916 }
917 #endif
918 
919 int module_finalize(const Elf_Ehdr *hdr,
920 		    const Elf_Shdr *sechdrs,
921 		    struct module *me)
922 {
923 	const Elf_Shdr *s;
924 
925 	s = find_section(hdr, sechdrs, ".alternative");
926 	if (s)
927 		apply_module_alternatives((void *)s->sh_addr, s->sh_size);
928 
929 	return 0;
930 }
931