xref: /linux/arch/riscv/kernel/module.c (revision 335bbdf01d25517ae832ac1807fd8323c1f4f3b9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Copyright (C) 2017 Zihao Yu
5  */
6 
7 #include <linux/elf.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/hashtable.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/moduleloader.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sizes.h>
16 #include <linux/pgtable.h>
17 #include <asm/alternative.h>
18 #include <asm/sections.h>
19 
20 struct used_bucket {
21 	struct list_head head;
22 	struct hlist_head *bucket;
23 };
24 
25 struct relocation_head {
26 	struct hlist_node node;
27 	struct list_head *rel_entry;
28 	void *location;
29 };
30 
31 struct relocation_entry {
32 	struct list_head head;
33 	Elf_Addr value;
34 	unsigned int type;
35 };
36 
37 struct relocation_handlers {
38 	int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
39 	int (*accumulate_handler)(struct module *me, void *location,
40 				  long buffer);
41 };
42 
43 unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
44 void process_accumulated_relocations(struct module *me);
45 int add_relocation_to_accumulate(struct module *me, int type, void *location,
46 				 unsigned int hashtable_bits, Elf_Addr v);
47 
48 struct hlist_head *relocation_hashtable;
49 
50 struct list_head used_buckets_list;
51 
52 /*
53  * The auipc+jalr instruction pair can reach any PC-relative offset
54  * in the range [-2^31 - 2^11, 2^31 - 2^11)
55  */
56 static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
57 {
58 #ifdef CONFIG_32BIT
59 	return true;
60 #else
61 	return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
62 #endif
63 }
64 
65 static int riscv_insn_rmw(void *location, u32 keep, u32 set)
66 {
67 	u16 *parcel = location;
68 	u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
69 
70 	insn &= keep;
71 	insn |= set;
72 
73 	parcel[0] = cpu_to_le16(insn);
74 	parcel[1] = cpu_to_le16(insn >> 16);
75 	return 0;
76 }
77 
78 static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
79 {
80 	u16 *parcel = location;
81 	u16 insn = le16_to_cpu(*parcel);
82 
83 	insn &= keep;
84 	insn |= set;
85 
86 	*parcel = cpu_to_le16(insn);
87 	return 0;
88 }
89 
90 static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
91 {
92 	if (v != (u32)v) {
93 		pr_err("%s: value %016llx out of range for 32-bit field\n",
94 		       me->name, (long long)v);
95 		return -EINVAL;
96 	}
97 	*(u32 *)location = v;
98 	return 0;
99 }
100 
101 static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
102 {
103 	*(u64 *)location = v;
104 	return 0;
105 }
106 
107 static int apply_r_riscv_branch_rela(struct module *me, void *location,
108 				     Elf_Addr v)
109 {
110 	ptrdiff_t offset = (void *)v - location;
111 	u32 imm12 = (offset & 0x1000) << (31 - 12);
112 	u32 imm11 = (offset & 0x800) >> (11 - 7);
113 	u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
114 	u32 imm4_1 = (offset & 0x1e) << (11 - 4);
115 
116 	return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
117 }
118 
119 static int apply_r_riscv_jal_rela(struct module *me, void *location,
120 				  Elf_Addr v)
121 {
122 	ptrdiff_t offset = (void *)v - location;
123 	u32 imm20 = (offset & 0x100000) << (31 - 20);
124 	u32 imm19_12 = (offset & 0xff000);
125 	u32 imm11 = (offset & 0x800) << (20 - 11);
126 	u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
127 
128 	return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
129 }
130 
131 static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
132 					 Elf_Addr v)
133 {
134 	ptrdiff_t offset = (void *)v - location;
135 	u16 imm8 = (offset & 0x100) << (12 - 8);
136 	u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
137 	u16 imm5 = (offset & 0x20) >> (5 - 2);
138 	u16 imm4_3 = (offset & 0x18) << (12 - 5);
139 	u16 imm2_1 = (offset & 0x6) << (12 - 10);
140 
141 	return riscv_insn_rvc_rmw(location, 0xe383,
142 			imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
143 }
144 
145 static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
146 				       Elf_Addr v)
147 {
148 	ptrdiff_t offset = (void *)v - location;
149 	u16 imm11 = (offset & 0x800) << (12 - 11);
150 	u16 imm10 = (offset & 0x400) >> (10 - 8);
151 	u16 imm9_8 = (offset & 0x300) << (12 - 11);
152 	u16 imm7 = (offset & 0x80) >> (7 - 6);
153 	u16 imm6 = (offset & 0x40) << (12 - 11);
154 	u16 imm5 = (offset & 0x20) >> (5 - 2);
155 	u16 imm4 = (offset & 0x10) << (12 - 5);
156 	u16 imm3_1 = (offset & 0xe) << (12 - 10);
157 
158 	return riscv_insn_rvc_rmw(location, 0xe003,
159 			imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
160 }
161 
162 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
163 					 Elf_Addr v)
164 {
165 	ptrdiff_t offset = (void *)v - location;
166 
167 	if (!riscv_insn_valid_32bit_offset(offset)) {
168 		pr_err(
169 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
170 		  me->name, (long long)v, location);
171 		return -EINVAL;
172 	}
173 
174 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
175 }
176 
177 static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
178 					   Elf_Addr v)
179 {
180 	/*
181 	 * v is the lo12 value to fill. It is calculated before calling this
182 	 * handler.
183 	 */
184 	return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
185 }
186 
187 static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
188 					   Elf_Addr v)
189 {
190 	/*
191 	 * v is the lo12 value to fill. It is calculated before calling this
192 	 * handler.
193 	 */
194 	u32 imm11_5 = (v & 0xfe0) << (31 - 11);
195 	u32 imm4_0 = (v & 0x1f) << (11 - 4);
196 
197 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
198 }
199 
200 static int apply_r_riscv_hi20_rela(struct module *me, void *location,
201 				   Elf_Addr v)
202 {
203 	if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
204 		pr_err(
205 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
206 		  me->name, (long long)v, location);
207 		return -EINVAL;
208 	}
209 
210 	return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
211 }
212 
213 static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
214 				     Elf_Addr v)
215 {
216 	/* Skip medlow checking because of filtering by HI20 already */
217 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
218 	s32 lo12 = ((s32)v - hi20);
219 
220 	return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
221 }
222 
223 static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
224 				     Elf_Addr v)
225 {
226 	/* Skip medlow checking because of filtering by HI20 already */
227 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
228 	s32 lo12 = ((s32)v - hi20);
229 	u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
230 	u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
231 
232 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
233 }
234 
235 static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
236 				       Elf_Addr v)
237 {
238 	ptrdiff_t offset = (void *)v - location;
239 
240 	/* Always emit the got entry */
241 	if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
242 		offset = (void *)module_emit_got_entry(me, v) - location;
243 	} else {
244 		pr_err(
245 		  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
246 		  me->name, (long long)v, location);
247 		return -EINVAL;
248 	}
249 
250 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
251 }
252 
253 static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
254 				       Elf_Addr v)
255 {
256 	ptrdiff_t offset = (void *)v - location;
257 	u32 hi20, lo12;
258 
259 	if (!riscv_insn_valid_32bit_offset(offset)) {
260 		/* Only emit the plt entry if offset over 32-bit range */
261 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
262 			offset = (void *)module_emit_plt_entry(me, v) - location;
263 		} else {
264 			pr_err(
265 			  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
266 			  me->name, (long long)v, location);
267 			return -EINVAL;
268 		}
269 	}
270 
271 	hi20 = (offset + 0x800) & 0xfffff000;
272 	lo12 = (offset - hi20) & 0xfff;
273 	riscv_insn_rmw(location, 0xfff, hi20);
274 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
275 }
276 
277 static int apply_r_riscv_call_rela(struct module *me, void *location,
278 				   Elf_Addr v)
279 {
280 	ptrdiff_t offset = (void *)v - location;
281 	u32 hi20, lo12;
282 
283 	if (!riscv_insn_valid_32bit_offset(offset)) {
284 		pr_err(
285 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
286 		  me->name, (long long)v, location);
287 		return -EINVAL;
288 	}
289 
290 	hi20 = (offset + 0x800) & 0xfffff000;
291 	lo12 = (offset - hi20) & 0xfff;
292 	riscv_insn_rmw(location, 0xfff, hi20);
293 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
294 }
295 
296 static int apply_r_riscv_relax_rela(struct module *me, void *location,
297 				    Elf_Addr v)
298 {
299 	return 0;
300 }
301 
302 static int apply_r_riscv_align_rela(struct module *me, void *location,
303 				    Elf_Addr v)
304 {
305 	pr_err(
306 	  "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
307 	  me->name, location);
308 	return -EINVAL;
309 }
310 
311 static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
312 {
313 	*(u8 *)location += (u8)v;
314 	return 0;
315 }
316 
317 static int apply_r_riscv_add16_rela(struct module *me, void *location,
318 				    Elf_Addr v)
319 {
320 	*(u16 *)location += (u16)v;
321 	return 0;
322 }
323 
324 static int apply_r_riscv_add32_rela(struct module *me, void *location,
325 				    Elf_Addr v)
326 {
327 	*(u32 *)location += (u32)v;
328 	return 0;
329 }
330 
331 static int apply_r_riscv_add64_rela(struct module *me, void *location,
332 				    Elf_Addr v)
333 {
334 	*(u64 *)location += (u64)v;
335 	return 0;
336 }
337 
338 static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
339 {
340 	*(u8 *)location -= (u8)v;
341 	return 0;
342 }
343 
344 static int apply_r_riscv_sub16_rela(struct module *me, void *location,
345 				    Elf_Addr v)
346 {
347 	*(u16 *)location -= (u16)v;
348 	return 0;
349 }
350 
351 static int apply_r_riscv_sub32_rela(struct module *me, void *location,
352 				    Elf_Addr v)
353 {
354 	*(u32 *)location -= (u32)v;
355 	return 0;
356 }
357 
358 static int apply_r_riscv_sub64_rela(struct module *me, void *location,
359 				    Elf_Addr v)
360 {
361 	*(u64 *)location -= (u64)v;
362 	return 0;
363 }
364 
365 static int dynamic_linking_not_supported(struct module *me, void *location,
366 					 Elf_Addr v)
367 {
368 	pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
369 	       me->name, location);
370 	return -EINVAL;
371 }
372 
373 static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
374 {
375 	pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
376 	       me->name, location);
377 	return -EINVAL;
378 }
379 
380 static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
381 {
382 	u8 *byte = location;
383 	u8 value = v;
384 
385 	*byte = (*byte - (value & 0x3f)) & 0x3f;
386 	return 0;
387 }
388 
389 static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
390 {
391 	u8 *byte = location;
392 	u8 value = v;
393 
394 	*byte = (*byte & 0xc0) | (value & 0x3f);
395 	return 0;
396 }
397 
398 static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
399 {
400 	*(u8 *)location = (u8)v;
401 	return 0;
402 }
403 
404 static int apply_r_riscv_set16_rela(struct module *me, void *location,
405 				    Elf_Addr v)
406 {
407 	*(u16 *)location = (u16)v;
408 	return 0;
409 }
410 
411 static int apply_r_riscv_set32_rela(struct module *me, void *location,
412 				    Elf_Addr v)
413 {
414 	*(u32 *)location = (u32)v;
415 	return 0;
416 }
417 
418 static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
419 				       Elf_Addr v)
420 {
421 	*(u32 *)location = v - (uintptr_t)location;
422 	return 0;
423 }
424 
425 static int apply_r_riscv_plt32_rela(struct module *me, void *location,
426 				    Elf_Addr v)
427 {
428 	ptrdiff_t offset = (void *)v - location;
429 
430 	if (!riscv_insn_valid_32bit_offset(offset)) {
431 		/* Only emit the plt entry if offset over 32-bit range */
432 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
433 			offset = (void *)module_emit_plt_entry(me, v) - location;
434 		} else {
435 			pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
436 			       me->name, (long long)v, location);
437 			return -EINVAL;
438 		}
439 	}
440 
441 	*(u32 *)location = (u32)offset;
442 	return 0;
443 }
444 
445 static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
446 {
447 	*(long *)location = v;
448 	return 0;
449 }
450 
451 static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
452 {
453 	*(long *)location -= v;
454 	return 0;
455 }
456 
457 static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
458 {
459 	u8 *byte = location;
460 	u8 value = buffer;
461 
462 	if (buffer > 0x3f) {
463 		pr_err("%s: value %ld out of range for 6-bit relocation.\n",
464 		       me->name, buffer);
465 		return -EINVAL;
466 	}
467 
468 	*byte = (*byte & 0xc0) | (value & 0x3f);
469 	return 0;
470 }
471 
472 static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
473 {
474 	if (buffer > U8_MAX) {
475 		pr_err("%s: value %ld out of range for 8-bit relocation.\n",
476 		       me->name, buffer);
477 		return -EINVAL;
478 	}
479 	*(u8 *)location = (u8)buffer;
480 	return 0;
481 }
482 
483 static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
484 {
485 	if (buffer > U16_MAX) {
486 		pr_err("%s: value %ld out of range for 16-bit relocation.\n",
487 		       me->name, buffer);
488 		return -EINVAL;
489 	}
490 	*(u16 *)location = (u16)buffer;
491 	return 0;
492 }
493 
494 static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
495 {
496 	if (buffer > U32_MAX) {
497 		pr_err("%s: value %ld out of range for 32-bit relocation.\n",
498 		       me->name, buffer);
499 		return -EINVAL;
500 	}
501 	*(u32 *)location = (u32)buffer;
502 	return 0;
503 }
504 
505 static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
506 {
507 	*(u64 *)location = (u64)buffer;
508 	return 0;
509 }
510 
511 static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
512 {
513 	/*
514 	 * ULEB128 is a variable length encoding. Encode the buffer into
515 	 * the ULEB128 data format.
516 	 */
517 	u8 *p = location;
518 
519 	while (buffer != 0) {
520 		u8 value = buffer & 0x7f;
521 
522 		buffer >>= 7;
523 		value |= (!!buffer) << 7;
524 
525 		*p++ = value;
526 	}
527 	return 0;
528 }
529 
530 /*
531  * Relocations defined in the riscv-elf-psabi-doc.
532  * This handles static linking only.
533  */
534 static const struct relocation_handlers reloc_handlers[] = {
535 	[R_RISCV_32]		= { .reloc_handler = apply_r_riscv_32_rela },
536 	[R_RISCV_64]		= { .reloc_handler = apply_r_riscv_64_rela },
537 	[R_RISCV_RELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
538 	[R_RISCV_COPY]		= { .reloc_handler = dynamic_linking_not_supported },
539 	[R_RISCV_JUMP_SLOT]	= { .reloc_handler = dynamic_linking_not_supported },
540 	[R_RISCV_TLS_DTPMOD32]	= { .reloc_handler = dynamic_linking_not_supported },
541 	[R_RISCV_TLS_DTPMOD64]	= { .reloc_handler = dynamic_linking_not_supported },
542 	[R_RISCV_TLS_DTPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
543 	[R_RISCV_TLS_DTPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
544 	[R_RISCV_TLS_TPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
545 	[R_RISCV_TLS_TPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
546 	/* 12-15 undefined */
547 	[R_RISCV_BRANCH]	= { .reloc_handler = apply_r_riscv_branch_rela },
548 	[R_RISCV_JAL]		= { .reloc_handler = apply_r_riscv_jal_rela },
549 	[R_RISCV_CALL]		= { .reloc_handler = apply_r_riscv_call_rela },
550 	[R_RISCV_CALL_PLT]	= { .reloc_handler = apply_r_riscv_call_plt_rela },
551 	[R_RISCV_GOT_HI20]	= { .reloc_handler = apply_r_riscv_got_hi20_rela },
552 	[R_RISCV_TLS_GOT_HI20]	= { .reloc_handler = tls_not_supported },
553 	[R_RISCV_TLS_GD_HI20]	= { .reloc_handler = tls_not_supported },
554 	[R_RISCV_PCREL_HI20]	= { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
555 	[R_RISCV_PCREL_LO12_I]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
556 	[R_RISCV_PCREL_LO12_S]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
557 	[R_RISCV_HI20]		= { .reloc_handler = apply_r_riscv_hi20_rela },
558 	[R_RISCV_LO12_I]	= { .reloc_handler = apply_r_riscv_lo12_i_rela },
559 	[R_RISCV_LO12_S]	= { .reloc_handler = apply_r_riscv_lo12_s_rela },
560 	[R_RISCV_TPREL_HI20]	= { .reloc_handler = tls_not_supported },
561 	[R_RISCV_TPREL_LO12_I]	= { .reloc_handler = tls_not_supported },
562 	[R_RISCV_TPREL_LO12_S]	= { .reloc_handler = tls_not_supported },
563 	[R_RISCV_TPREL_ADD]	= { .reloc_handler = tls_not_supported },
564 	[R_RISCV_ADD8]		= { .reloc_handler = apply_r_riscv_add8_rela,
565 				    .accumulate_handler = apply_8_bit_accumulation },
566 	[R_RISCV_ADD16]		= { .reloc_handler = apply_r_riscv_add16_rela,
567 				    .accumulate_handler = apply_16_bit_accumulation },
568 	[R_RISCV_ADD32]		= { .reloc_handler = apply_r_riscv_add32_rela,
569 				    .accumulate_handler = apply_32_bit_accumulation },
570 	[R_RISCV_ADD64]		= { .reloc_handler = apply_r_riscv_add64_rela,
571 				    .accumulate_handler = apply_64_bit_accumulation },
572 	[R_RISCV_SUB8]		= { .reloc_handler = apply_r_riscv_sub8_rela,
573 				    .accumulate_handler = apply_8_bit_accumulation },
574 	[R_RISCV_SUB16]		= { .reloc_handler = apply_r_riscv_sub16_rela,
575 				    .accumulate_handler = apply_16_bit_accumulation },
576 	[R_RISCV_SUB32]		= { .reloc_handler = apply_r_riscv_sub32_rela,
577 				    .accumulate_handler = apply_32_bit_accumulation },
578 	[R_RISCV_SUB64]		= { .reloc_handler = apply_r_riscv_sub64_rela,
579 				    .accumulate_handler = apply_64_bit_accumulation },
580 	/* 41-42 reserved for future standard use */
581 	[R_RISCV_ALIGN]		= { .reloc_handler = apply_r_riscv_align_rela },
582 	[R_RISCV_RVC_BRANCH]	= { .reloc_handler = apply_r_riscv_rvc_branch_rela },
583 	[R_RISCV_RVC_JUMP]	= { .reloc_handler = apply_r_riscv_rvc_jump_rela },
584 	/* 46-50 reserved for future standard use */
585 	[R_RISCV_RELAX]		= { .reloc_handler = apply_r_riscv_relax_rela },
586 	[R_RISCV_SUB6]		= { .reloc_handler = apply_r_riscv_sub6_rela,
587 				    .accumulate_handler = apply_6_bit_accumulation },
588 	[R_RISCV_SET6]		= { .reloc_handler = apply_r_riscv_set6_rela,
589 				    .accumulate_handler = apply_6_bit_accumulation },
590 	[R_RISCV_SET8]		= { .reloc_handler = apply_r_riscv_set8_rela,
591 				    .accumulate_handler = apply_8_bit_accumulation },
592 	[R_RISCV_SET16]		= { .reloc_handler = apply_r_riscv_set16_rela,
593 				    .accumulate_handler = apply_16_bit_accumulation },
594 	[R_RISCV_SET32]		= { .reloc_handler = apply_r_riscv_set32_rela,
595 				    .accumulate_handler = apply_32_bit_accumulation },
596 	[R_RISCV_32_PCREL]	= { .reloc_handler = apply_r_riscv_32_pcrel_rela },
597 	[R_RISCV_IRELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
598 	[R_RISCV_PLT32]		= { .reloc_handler = apply_r_riscv_plt32_rela },
599 	[R_RISCV_SET_ULEB128]	= { .reloc_handler = apply_r_riscv_set_uleb128,
600 				    .accumulate_handler = apply_uleb128_accumulation },
601 	[R_RISCV_SUB_ULEB128]	= { .reloc_handler = apply_r_riscv_sub_uleb128,
602 				    .accumulate_handler = apply_uleb128_accumulation },
603 	/* 62-191 reserved for future standard use */
604 	/* 192-255 nonstandard ABI extensions  */
605 };
606 
607 void process_accumulated_relocations(struct module *me)
608 {
609 	/*
610 	 * Only ADD/SUB/SET/ULEB128 should end up here.
611 	 *
612 	 * Each bucket may have more than one relocation location. All
613 	 * relocations for a location are stored in a list in a bucket.
614 	 *
615 	 * Relocations are applied to a temp variable before being stored to the
616 	 * provided location to check for overflow. This also allows ULEB128 to
617 	 * properly decide how many entries are needed before storing to
618 	 * location. The final value is stored into location using the handler
619 	 * for the last relocation to an address.
620 	 *
621 	 * Three layers of indexing:
622 	 *	- Each of the buckets in use
623 	 *	- Groups of relocations in each bucket by location address
624 	 *	- Each relocation entry for a location address
625 	 */
626 	struct used_bucket *bucket_iter;
627 	struct relocation_head *rel_head_iter;
628 	struct relocation_entry *rel_entry_iter;
629 	int curr_type;
630 	void *location;
631 	long buffer;
632 
633 	list_for_each_entry(bucket_iter, &used_buckets_list, head) {
634 		hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
635 			buffer = 0;
636 			location = rel_head_iter->location;
637 			list_for_each_entry(rel_entry_iter,
638 					    rel_head_iter->rel_entry, head) {
639 				curr_type = rel_entry_iter->type;
640 				reloc_handlers[curr_type].reloc_handler(
641 					me, &buffer, rel_entry_iter->value);
642 				kfree(rel_entry_iter);
643 			}
644 			reloc_handlers[curr_type].accumulate_handler(
645 				me, location, buffer);
646 			kfree(rel_head_iter);
647 		}
648 		kfree(bucket_iter);
649 	}
650 
651 	kfree(relocation_hashtable);
652 }
653 
654 int add_relocation_to_accumulate(struct module *me, int type, void *location,
655 				 unsigned int hashtable_bits, Elf_Addr v)
656 {
657 	struct relocation_entry *entry;
658 	struct relocation_head *rel_head;
659 	struct hlist_head *current_head;
660 	struct used_bucket *bucket;
661 	unsigned long hash;
662 
663 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
664 	INIT_LIST_HEAD(&entry->head);
665 	entry->type = type;
666 	entry->value = v;
667 
668 	hash = hash_min((uintptr_t)location, hashtable_bits);
669 
670 	current_head = &relocation_hashtable[hash];
671 
672 	/* Find matching location (if any) */
673 	bool found = false;
674 	struct relocation_head *rel_head_iter;
675 
676 	hlist_for_each_entry(rel_head_iter, current_head, node) {
677 		if (rel_head_iter->location == location) {
678 			found = true;
679 			rel_head = rel_head_iter;
680 			break;
681 		}
682 	}
683 
684 	if (!found) {
685 		rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
686 		rel_head->rel_entry =
687 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
688 		INIT_LIST_HEAD(rel_head->rel_entry);
689 		rel_head->location = location;
690 		INIT_HLIST_NODE(&rel_head->node);
691 		if (!current_head->first) {
692 			bucket =
693 				kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
694 			INIT_LIST_HEAD(&bucket->head);
695 			bucket->bucket = current_head;
696 			list_add(&bucket->head, &used_buckets_list);
697 		}
698 		hlist_add_head(&rel_head->node, current_head);
699 	}
700 
701 	/* Add relocation to head of discovered rel_head */
702 	list_add_tail(&entry->head, rel_head->rel_entry);
703 
704 	return 0;
705 }
706 
707 unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
708 {
709 	/* Can safely assume that bits is not greater than sizeof(long) */
710 	unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
711 	unsigned int hashtable_bits = ilog2(hashtable_size);
712 
713 	/*
714 	 * Double size of hashtable if num_relocations * 1.25 is greater than
715 	 * hashtable_size.
716 	 */
717 	int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
718 
719 	hashtable_bits += should_double_size;
720 
721 	hashtable_size <<= should_double_size;
722 
723 	relocation_hashtable = kmalloc_array(hashtable_size,
724 					     sizeof(*relocation_hashtable),
725 					     GFP_KERNEL);
726 	__hash_init(relocation_hashtable, hashtable_size);
727 
728 	INIT_LIST_HEAD(&used_buckets_list);
729 
730 	return hashtable_bits;
731 }
732 
733 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
734 		       unsigned int symindex, unsigned int relsec,
735 		       struct module *me)
736 {
737 	Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
738 	int (*handler)(struct module *me, void *location, Elf_Addr v);
739 	Elf_Sym *sym;
740 	void *location;
741 	unsigned int i, type;
742 	Elf_Addr v;
743 	int res;
744 	unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
745 	unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
746 
747 	pr_debug("Applying relocate section %u to %u\n", relsec,
748 	       sechdrs[relsec].sh_info);
749 
750 	for (i = 0; i < num_relocations; i++) {
751 		/* This is where to make the change */
752 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
753 			+ rel[i].r_offset;
754 		/* This is the symbol it is referring to */
755 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
756 			+ ELF_RISCV_R_SYM(rel[i].r_info);
757 		if (IS_ERR_VALUE(sym->st_value)) {
758 			/* Ignore unresolved weak symbol */
759 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
760 				continue;
761 			pr_warn("%s: Unknown symbol %s\n",
762 				me->name, strtab + sym->st_name);
763 			return -ENOENT;
764 		}
765 
766 		type = ELF_RISCV_R_TYPE(rel[i].r_info);
767 
768 		if (type < ARRAY_SIZE(reloc_handlers))
769 			handler = reloc_handlers[type].reloc_handler;
770 		else
771 			handler = NULL;
772 
773 		if (!handler) {
774 			pr_err("%s: Unknown relocation type %u\n",
775 			       me->name, type);
776 			return -EINVAL;
777 		}
778 
779 		v = sym->st_value + rel[i].r_addend;
780 
781 		if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
782 			unsigned int j;
783 
784 			for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
785 				unsigned long hi20_loc =
786 					sechdrs[sechdrs[relsec].sh_info].sh_addr
787 					+ rel[j].r_offset;
788 				u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
789 
790 				/* Find the corresponding HI20 relocation entry */
791 				if (hi20_loc == sym->st_value
792 				    && (hi20_type == R_RISCV_PCREL_HI20
793 					|| hi20_type == R_RISCV_GOT_HI20)) {
794 					s32 hi20, lo12;
795 					Elf_Sym *hi20_sym =
796 						(Elf_Sym *)sechdrs[symindex].sh_addr
797 						+ ELF_RISCV_R_SYM(rel[j].r_info);
798 					unsigned long hi20_sym_val =
799 						hi20_sym->st_value
800 						+ rel[j].r_addend;
801 
802 					/* Calculate lo12 */
803 					size_t offset = hi20_sym_val - hi20_loc;
804 					if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
805 					    && hi20_type == R_RISCV_GOT_HI20) {
806 						offset = module_emit_got_entry(
807 							 me, hi20_sym_val);
808 						offset = offset - hi20_loc;
809 					}
810 					hi20 = (offset + 0x800) & 0xfffff000;
811 					lo12 = offset - hi20;
812 					v = lo12;
813 
814 					break;
815 				}
816 			}
817 			if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
818 				pr_err(
819 				  "%s: Can not find HI20 relocation information\n",
820 				  me->name);
821 				return -EINVAL;
822 			}
823 		}
824 
825 		if (reloc_handlers[type].accumulate_handler)
826 			res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
827 		else
828 			res = handler(me, location, v);
829 		if (res)
830 			return res;
831 	}
832 
833 	process_accumulated_relocations(me);
834 
835 	return 0;
836 }
837 
838 #if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
839 void *module_alloc(unsigned long size)
840 {
841 	return __vmalloc_node_range(size, 1, MODULES_VADDR,
842 				    MODULES_END, GFP_KERNEL,
843 				    PAGE_KERNEL, 0, NUMA_NO_NODE,
844 				    __builtin_return_address(0));
845 }
846 #endif
847 
848 int module_finalize(const Elf_Ehdr *hdr,
849 		    const Elf_Shdr *sechdrs,
850 		    struct module *me)
851 {
852 	const Elf_Shdr *s;
853 
854 	s = find_section(hdr, sechdrs, ".alternative");
855 	if (s)
856 		apply_module_alternatives((void *)s->sh_addr, s->sh_size);
857 
858 	return 0;
859 }
860