xref: /linux/arch/riscv/kernel/module.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  *  Copyright (C) 2017 Zihao Yu
5  */
6 
7 #include <linux/elf.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/hashtable.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/moduleloader.h>
14 #include <linux/sizes.h>
15 #include <linux/pgtable.h>
16 #include <asm/alternative.h>
17 #include <asm/sections.h>
18 
19 struct used_bucket {
20 	struct list_head head;
21 	struct hlist_head *bucket;
22 };
23 
24 struct relocation_head {
25 	struct hlist_node node;
26 	struct list_head *rel_entry;
27 	void *location;
28 };
29 
30 struct relocation_entry {
31 	struct list_head head;
32 	Elf_Addr value;
33 	unsigned int type;
34 };
35 
36 struct relocation_handlers {
37 	int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
38 	int (*accumulate_handler)(struct module *me, void *location,
39 				  long buffer);
40 };
41 
42 /*
43  * The auipc+jalr instruction pair can reach any PC-relative offset
44  * in the range [-2^31 - 2^11, 2^31 - 2^11)
45  */
46 static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
47 {
48 #ifdef CONFIG_32BIT
49 	return true;
50 #else
51 	return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
52 #endif
53 }
54 
55 static int riscv_insn_rmw(void *location, u32 keep, u32 set)
56 {
57 	__le16 *parcel = location;
58 	u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
59 
60 	insn &= keep;
61 	insn |= set;
62 
63 	parcel[0] = cpu_to_le16(insn);
64 	parcel[1] = cpu_to_le16(insn >> 16);
65 	return 0;
66 }
67 
68 static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
69 {
70 	__le16 *parcel = location;
71 	u16 insn = le16_to_cpu(*parcel);
72 
73 	insn &= keep;
74 	insn |= set;
75 
76 	*parcel = cpu_to_le16(insn);
77 	return 0;
78 }
79 
80 static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
81 {
82 	if (v != (u32)v) {
83 		pr_err("%s: value %016llx out of range for 32-bit field\n",
84 		       me->name, (long long)v);
85 		return -EINVAL;
86 	}
87 	*(u32 *)location = v;
88 	return 0;
89 }
90 
91 static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
92 {
93 	*(u64 *)location = v;
94 	return 0;
95 }
96 
97 static int apply_r_riscv_branch_rela(struct module *me, void *location,
98 				     Elf_Addr v)
99 {
100 	ptrdiff_t offset = (void *)v - location;
101 	u32 imm12 = (offset & 0x1000) << (31 - 12);
102 	u32 imm11 = (offset & 0x800) >> (11 - 7);
103 	u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
104 	u32 imm4_1 = (offset & 0x1e) << (11 - 4);
105 
106 	return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
107 }
108 
109 static int apply_r_riscv_jal_rela(struct module *me, void *location,
110 				  Elf_Addr v)
111 {
112 	ptrdiff_t offset = (void *)v - location;
113 	u32 imm20 = (offset & 0x100000) << (31 - 20);
114 	u32 imm19_12 = (offset & 0xff000);
115 	u32 imm11 = (offset & 0x800) << (20 - 11);
116 	u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
117 
118 	return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
119 }
120 
121 static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
122 					 Elf_Addr v)
123 {
124 	ptrdiff_t offset = (void *)v - location;
125 	u16 imm8 = (offset & 0x100) << (12 - 8);
126 	u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
127 	u16 imm5 = (offset & 0x20) >> (5 - 2);
128 	u16 imm4_3 = (offset & 0x18) << (12 - 5);
129 	u16 imm2_1 = (offset & 0x6) << (12 - 10);
130 
131 	return riscv_insn_rvc_rmw(location, 0xe383,
132 			imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
133 }
134 
135 static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
136 				       Elf_Addr v)
137 {
138 	ptrdiff_t offset = (void *)v - location;
139 	u16 imm11 = (offset & 0x800) << (12 - 11);
140 	u16 imm10 = (offset & 0x400) >> (10 - 8);
141 	u16 imm9_8 = (offset & 0x300) << (12 - 11);
142 	u16 imm7 = (offset & 0x80) >> (7 - 6);
143 	u16 imm6 = (offset & 0x40) << (12 - 11);
144 	u16 imm5 = (offset & 0x20) >> (5 - 2);
145 	u16 imm4 = (offset & 0x10) << (12 - 5);
146 	u16 imm3_1 = (offset & 0xe) << (12 - 10);
147 
148 	return riscv_insn_rvc_rmw(location, 0xe003,
149 			imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
150 }
151 
152 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
153 					 Elf_Addr v)
154 {
155 	ptrdiff_t offset = (void *)v - location;
156 
157 	if (!riscv_insn_valid_32bit_offset(offset)) {
158 		pr_err(
159 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
160 		  me->name, (long long)v, location);
161 		return -EINVAL;
162 	}
163 
164 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
165 }
166 
167 static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
168 					   Elf_Addr v)
169 {
170 	/*
171 	 * v is the lo12 value to fill. It is calculated before calling this
172 	 * handler.
173 	 */
174 	return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
175 }
176 
177 static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
178 					   Elf_Addr v)
179 {
180 	/*
181 	 * v is the lo12 value to fill. It is calculated before calling this
182 	 * handler.
183 	 */
184 	u32 imm11_5 = (v & 0xfe0) << (31 - 11);
185 	u32 imm4_0 = (v & 0x1f) << (11 - 4);
186 
187 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
188 }
189 
190 static int apply_r_riscv_hi20_rela(struct module *me, void *location,
191 				   Elf_Addr v)
192 {
193 	if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
194 		pr_err(
195 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
196 		  me->name, (long long)v, location);
197 		return -EINVAL;
198 	}
199 
200 	return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
201 }
202 
203 static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
204 				     Elf_Addr v)
205 {
206 	/* Skip medlow checking because of filtering by HI20 already */
207 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
208 	s32 lo12 = ((s32)v - hi20);
209 
210 	return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
211 }
212 
213 static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
214 				     Elf_Addr v)
215 {
216 	/* Skip medlow checking because of filtering by HI20 already */
217 	s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
218 	s32 lo12 = ((s32)v - hi20);
219 	u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
220 	u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
221 
222 	return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
223 }
224 
225 static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
226 				       Elf_Addr v)
227 {
228 	ptrdiff_t offset = (void *)v - location;
229 
230 	/* Always emit the got entry */
231 	if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
232 		offset = (void *)module_emit_got_entry(me, v) - location;
233 	} else {
234 		pr_err(
235 		  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
236 		  me->name, (long long)v, location);
237 		return -EINVAL;
238 	}
239 
240 	return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
241 }
242 
243 static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
244 				       Elf_Addr v)
245 {
246 	ptrdiff_t offset = (void *)v - location;
247 	u32 hi20, lo12;
248 
249 	if (!riscv_insn_valid_32bit_offset(offset)) {
250 		/* Only emit the plt entry if offset over 32-bit range */
251 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
252 			offset = (void *)module_emit_plt_entry(me, v) - location;
253 		} else {
254 			pr_err(
255 			  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
256 			  me->name, (long long)v, location);
257 			return -EINVAL;
258 		}
259 	}
260 
261 	hi20 = (offset + 0x800) & 0xfffff000;
262 	lo12 = (offset - hi20) & 0xfff;
263 	riscv_insn_rmw(location, 0xfff, hi20);
264 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
265 }
266 
267 static int apply_r_riscv_call_rela(struct module *me, void *location,
268 				   Elf_Addr v)
269 {
270 	ptrdiff_t offset = (void *)v - location;
271 	u32 hi20, lo12;
272 
273 	if (!riscv_insn_valid_32bit_offset(offset)) {
274 		pr_err(
275 		  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
276 		  me->name, (long long)v, location);
277 		return -EINVAL;
278 	}
279 
280 	hi20 = (offset + 0x800) & 0xfffff000;
281 	lo12 = (offset - hi20) & 0xfff;
282 	riscv_insn_rmw(location, 0xfff, hi20);
283 	return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
284 }
285 
286 static int apply_r_riscv_relax_rela(struct module *me, void *location,
287 				    Elf_Addr v)
288 {
289 	return 0;
290 }
291 
292 static int apply_r_riscv_align_rela(struct module *me, void *location,
293 				    Elf_Addr v)
294 {
295 	pr_err(
296 	  "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
297 	  me->name, location);
298 	return -EINVAL;
299 }
300 
301 static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
302 {
303 	*(u8 *)location += (u8)v;
304 	return 0;
305 }
306 
307 static int apply_r_riscv_add16_rela(struct module *me, void *location,
308 				    Elf_Addr v)
309 {
310 	*(u16 *)location += (u16)v;
311 	return 0;
312 }
313 
314 static int apply_r_riscv_add32_rela(struct module *me, void *location,
315 				    Elf_Addr v)
316 {
317 	*(u32 *)location += (u32)v;
318 	return 0;
319 }
320 
321 static int apply_r_riscv_add64_rela(struct module *me, void *location,
322 				    Elf_Addr v)
323 {
324 	*(u64 *)location += (u64)v;
325 	return 0;
326 }
327 
328 static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
329 {
330 	*(u8 *)location -= (u8)v;
331 	return 0;
332 }
333 
334 static int apply_r_riscv_sub16_rela(struct module *me, void *location,
335 				    Elf_Addr v)
336 {
337 	*(u16 *)location -= (u16)v;
338 	return 0;
339 }
340 
341 static int apply_r_riscv_sub32_rela(struct module *me, void *location,
342 				    Elf_Addr v)
343 {
344 	*(u32 *)location -= (u32)v;
345 	return 0;
346 }
347 
348 static int apply_r_riscv_sub64_rela(struct module *me, void *location,
349 				    Elf_Addr v)
350 {
351 	*(u64 *)location -= (u64)v;
352 	return 0;
353 }
354 
355 static int dynamic_linking_not_supported(struct module *me, void *location,
356 					 Elf_Addr v)
357 {
358 	pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
359 	       me->name, location);
360 	return -EINVAL;
361 }
362 
363 static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
364 {
365 	pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
366 	       me->name, location);
367 	return -EINVAL;
368 }
369 
370 static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
371 {
372 	u8 *byte = location;
373 	u8 value = v;
374 
375 	*byte = (*byte - (value & 0x3f)) & 0x3f;
376 	return 0;
377 }
378 
379 static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
380 {
381 	u8 *byte = location;
382 	u8 value = v;
383 
384 	*byte = (*byte & 0xc0) | (value & 0x3f);
385 	return 0;
386 }
387 
388 static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
389 {
390 	*(u8 *)location = (u8)v;
391 	return 0;
392 }
393 
394 static int apply_r_riscv_set16_rela(struct module *me, void *location,
395 				    Elf_Addr v)
396 {
397 	*(u16 *)location = (u16)v;
398 	return 0;
399 }
400 
401 static int apply_r_riscv_set32_rela(struct module *me, void *location,
402 				    Elf_Addr v)
403 {
404 	*(u32 *)location = (u32)v;
405 	return 0;
406 }
407 
408 static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
409 				       Elf_Addr v)
410 {
411 	*(u32 *)location = v - (uintptr_t)location;
412 	return 0;
413 }
414 
415 static int apply_r_riscv_plt32_rela(struct module *me, void *location,
416 				    Elf_Addr v)
417 {
418 	ptrdiff_t offset = (void *)v - location;
419 
420 	if (!riscv_insn_valid_32bit_offset(offset)) {
421 		/* Only emit the plt entry if offset over 32-bit range */
422 		if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
423 			offset = (void *)module_emit_plt_entry(me, v) - location;
424 		} else {
425 			pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
426 			       me->name, (long long)v, location);
427 			return -EINVAL;
428 		}
429 	}
430 
431 	*(u32 *)location = (u32)offset;
432 	return 0;
433 }
434 
435 static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
436 {
437 	*(long *)location = v;
438 	return 0;
439 }
440 
441 static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
442 {
443 	*(long *)location -= v;
444 	return 0;
445 }
446 
447 static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
448 {
449 	u8 *byte = location;
450 	u8 value = buffer;
451 
452 	if (buffer > 0x3f) {
453 		pr_err("%s: value %ld out of range for 6-bit relocation.\n",
454 		       me->name, buffer);
455 		return -EINVAL;
456 	}
457 
458 	*byte = (*byte & 0xc0) | (value & 0x3f);
459 	return 0;
460 }
461 
462 static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
463 {
464 	if (buffer > U8_MAX) {
465 		pr_err("%s: value %ld out of range for 8-bit relocation.\n",
466 		       me->name, buffer);
467 		return -EINVAL;
468 	}
469 	*(u8 *)location = (u8)buffer;
470 	return 0;
471 }
472 
473 static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
474 {
475 	if (buffer > U16_MAX) {
476 		pr_err("%s: value %ld out of range for 16-bit relocation.\n",
477 		       me->name, buffer);
478 		return -EINVAL;
479 	}
480 	*(u16 *)location = (u16)buffer;
481 	return 0;
482 }
483 
484 static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
485 {
486 	if (buffer > U32_MAX) {
487 		pr_err("%s: value %ld out of range for 32-bit relocation.\n",
488 		       me->name, buffer);
489 		return -EINVAL;
490 	}
491 	*(u32 *)location = (u32)buffer;
492 	return 0;
493 }
494 
495 static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
496 {
497 	*(u64 *)location = (u64)buffer;
498 	return 0;
499 }
500 
501 static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
502 {
503 	/*
504 	 * ULEB128 is a variable length encoding. Encode the buffer into
505 	 * the ULEB128 data format.
506 	 */
507 	u8 *p = location;
508 
509 	while (buffer != 0) {
510 		u8 value = buffer & 0x7f;
511 
512 		buffer >>= 7;
513 		value |= (!!buffer) << 7;
514 
515 		*p++ = value;
516 	}
517 	return 0;
518 }
519 
520 /*
521  * Relocations defined in the riscv-elf-psabi-doc.
522  * This handles static linking only.
523  */
524 static const struct relocation_handlers reloc_handlers[] = {
525 	[R_RISCV_32]		= { .reloc_handler = apply_r_riscv_32_rela },
526 	[R_RISCV_64]		= { .reloc_handler = apply_r_riscv_64_rela },
527 	[R_RISCV_RELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
528 	[R_RISCV_COPY]		= { .reloc_handler = dynamic_linking_not_supported },
529 	[R_RISCV_JUMP_SLOT]	= { .reloc_handler = dynamic_linking_not_supported },
530 	[R_RISCV_TLS_DTPMOD32]	= { .reloc_handler = dynamic_linking_not_supported },
531 	[R_RISCV_TLS_DTPMOD64]	= { .reloc_handler = dynamic_linking_not_supported },
532 	[R_RISCV_TLS_DTPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
533 	[R_RISCV_TLS_DTPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
534 	[R_RISCV_TLS_TPREL32]	= { .reloc_handler = dynamic_linking_not_supported },
535 	[R_RISCV_TLS_TPREL64]	= { .reloc_handler = dynamic_linking_not_supported },
536 	/* 12-15 undefined */
537 	[R_RISCV_BRANCH]	= { .reloc_handler = apply_r_riscv_branch_rela },
538 	[R_RISCV_JAL]		= { .reloc_handler = apply_r_riscv_jal_rela },
539 	[R_RISCV_CALL]		= { .reloc_handler = apply_r_riscv_call_rela },
540 	[R_RISCV_CALL_PLT]	= { .reloc_handler = apply_r_riscv_call_plt_rela },
541 	[R_RISCV_GOT_HI20]	= { .reloc_handler = apply_r_riscv_got_hi20_rela },
542 	[R_RISCV_TLS_GOT_HI20]	= { .reloc_handler = tls_not_supported },
543 	[R_RISCV_TLS_GD_HI20]	= { .reloc_handler = tls_not_supported },
544 	[R_RISCV_PCREL_HI20]	= { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
545 	[R_RISCV_PCREL_LO12_I]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
546 	[R_RISCV_PCREL_LO12_S]	= { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
547 	[R_RISCV_HI20]		= { .reloc_handler = apply_r_riscv_hi20_rela },
548 	[R_RISCV_LO12_I]	= { .reloc_handler = apply_r_riscv_lo12_i_rela },
549 	[R_RISCV_LO12_S]	= { .reloc_handler = apply_r_riscv_lo12_s_rela },
550 	[R_RISCV_TPREL_HI20]	= { .reloc_handler = tls_not_supported },
551 	[R_RISCV_TPREL_LO12_I]	= { .reloc_handler = tls_not_supported },
552 	[R_RISCV_TPREL_LO12_S]	= { .reloc_handler = tls_not_supported },
553 	[R_RISCV_TPREL_ADD]	= { .reloc_handler = tls_not_supported },
554 	[R_RISCV_ADD8]		= { .reloc_handler = apply_r_riscv_add8_rela,
555 				    .accumulate_handler = apply_8_bit_accumulation },
556 	[R_RISCV_ADD16]		= { .reloc_handler = apply_r_riscv_add16_rela,
557 				    .accumulate_handler = apply_16_bit_accumulation },
558 	[R_RISCV_ADD32]		= { .reloc_handler = apply_r_riscv_add32_rela,
559 				    .accumulate_handler = apply_32_bit_accumulation },
560 	[R_RISCV_ADD64]		= { .reloc_handler = apply_r_riscv_add64_rela,
561 				    .accumulate_handler = apply_64_bit_accumulation },
562 	[R_RISCV_SUB8]		= { .reloc_handler = apply_r_riscv_sub8_rela,
563 				    .accumulate_handler = apply_8_bit_accumulation },
564 	[R_RISCV_SUB16]		= { .reloc_handler = apply_r_riscv_sub16_rela,
565 				    .accumulate_handler = apply_16_bit_accumulation },
566 	[R_RISCV_SUB32]		= { .reloc_handler = apply_r_riscv_sub32_rela,
567 				    .accumulate_handler = apply_32_bit_accumulation },
568 	[R_RISCV_SUB64]		= { .reloc_handler = apply_r_riscv_sub64_rela,
569 				    .accumulate_handler = apply_64_bit_accumulation },
570 	/* 41-42 reserved for future standard use */
571 	[R_RISCV_ALIGN]		= { .reloc_handler = apply_r_riscv_align_rela },
572 	[R_RISCV_RVC_BRANCH]	= { .reloc_handler = apply_r_riscv_rvc_branch_rela },
573 	[R_RISCV_RVC_JUMP]	= { .reloc_handler = apply_r_riscv_rvc_jump_rela },
574 	/* 46-50 reserved for future standard use */
575 	[R_RISCV_RELAX]		= { .reloc_handler = apply_r_riscv_relax_rela },
576 	[R_RISCV_SUB6]		= { .reloc_handler = apply_r_riscv_sub6_rela,
577 				    .accumulate_handler = apply_6_bit_accumulation },
578 	[R_RISCV_SET6]		= { .reloc_handler = apply_r_riscv_set6_rela,
579 				    .accumulate_handler = apply_6_bit_accumulation },
580 	[R_RISCV_SET8]		= { .reloc_handler = apply_r_riscv_set8_rela,
581 				    .accumulate_handler = apply_8_bit_accumulation },
582 	[R_RISCV_SET16]		= { .reloc_handler = apply_r_riscv_set16_rela,
583 				    .accumulate_handler = apply_16_bit_accumulation },
584 	[R_RISCV_SET32]		= { .reloc_handler = apply_r_riscv_set32_rela,
585 				    .accumulate_handler = apply_32_bit_accumulation },
586 	[R_RISCV_32_PCREL]	= { .reloc_handler = apply_r_riscv_32_pcrel_rela },
587 	[R_RISCV_IRELATIVE]	= { .reloc_handler = dynamic_linking_not_supported },
588 	[R_RISCV_PLT32]		= { .reloc_handler = apply_r_riscv_plt32_rela },
589 	[R_RISCV_SET_ULEB128]	= { .reloc_handler = apply_r_riscv_set_uleb128,
590 				    .accumulate_handler = apply_uleb128_accumulation },
591 	[R_RISCV_SUB_ULEB128]	= { .reloc_handler = apply_r_riscv_sub_uleb128,
592 				    .accumulate_handler = apply_uleb128_accumulation },
593 	/* 62-191 reserved for future standard use */
594 	/* 192-255 nonstandard ABI extensions  */
595 };
596 
597 static void
598 process_accumulated_relocations(struct module *me,
599 				struct hlist_head **relocation_hashtable,
600 				struct list_head *used_buckets_list)
601 {
602 	/*
603 	 * Only ADD/SUB/SET/ULEB128 should end up here.
604 	 *
605 	 * Each bucket may have more than one relocation location. All
606 	 * relocations for a location are stored in a list in a bucket.
607 	 *
608 	 * Relocations are applied to a temp variable before being stored to the
609 	 * provided location to check for overflow. This also allows ULEB128 to
610 	 * properly decide how many entries are needed before storing to
611 	 * location. The final value is stored into location using the handler
612 	 * for the last relocation to an address.
613 	 *
614 	 * Three layers of indexing:
615 	 *	- Each of the buckets in use
616 	 *	- Groups of relocations in each bucket by location address
617 	 *	- Each relocation entry for a location address
618 	 */
619 	struct used_bucket *bucket_iter;
620 	struct used_bucket *bucket_iter_tmp;
621 	struct relocation_head *rel_head_iter;
622 	struct hlist_node *rel_head_iter_tmp;
623 	struct relocation_entry *rel_entry_iter;
624 	struct relocation_entry *rel_entry_iter_tmp;
625 	int curr_type;
626 	void *location;
627 	long buffer;
628 
629 	list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
630 				 used_buckets_list, head) {
631 		hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
632 					  bucket_iter->bucket, node) {
633 			buffer = 0;
634 			location = rel_head_iter->location;
635 			list_for_each_entry_safe(rel_entry_iter,
636 						 rel_entry_iter_tmp,
637 						 rel_head_iter->rel_entry,
638 						 head) {
639 				curr_type = rel_entry_iter->type;
640 				reloc_handlers[curr_type].reloc_handler(
641 					me, &buffer, rel_entry_iter->value);
642 				kfree(rel_entry_iter);
643 			}
644 			reloc_handlers[curr_type].accumulate_handler(
645 				me, location, buffer);
646 			kfree(rel_head_iter);
647 		}
648 		kfree(bucket_iter);
649 	}
650 
651 	kfree(*relocation_hashtable);
652 }
653 
654 static int add_relocation_to_accumulate(struct module *me, int type,
655 					void *location,
656 					unsigned int hashtable_bits, Elf_Addr v,
657 					struct hlist_head *relocation_hashtable,
658 					struct list_head *used_buckets_list)
659 {
660 	struct relocation_entry *entry;
661 	struct relocation_head *rel_head;
662 	struct hlist_head *current_head;
663 	struct used_bucket *bucket;
664 	unsigned long hash;
665 
666 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
667 
668 	if (!entry)
669 		return -ENOMEM;
670 
671 	INIT_LIST_HEAD(&entry->head);
672 	entry->type = type;
673 	entry->value = v;
674 
675 	hash = hash_min((uintptr_t)location, hashtable_bits);
676 
677 	current_head = &relocation_hashtable[hash];
678 
679 	/*
680 	 * Search for the relocation_head for the relocations that happen at the
681 	 * provided location
682 	 */
683 	bool found = false;
684 	struct relocation_head *rel_head_iter;
685 
686 	hlist_for_each_entry(rel_head_iter, current_head, node) {
687 		if (rel_head_iter->location == location) {
688 			found = true;
689 			rel_head = rel_head_iter;
690 			break;
691 		}
692 	}
693 
694 	/*
695 	 * If there has not yet been any relocations at the provided location,
696 	 * create a relocation_head for that location and populate it with this
697 	 * relocation_entry.
698 	 */
699 	if (!found) {
700 		rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
701 
702 		if (!rel_head) {
703 			kfree(entry);
704 			return -ENOMEM;
705 		}
706 
707 		rel_head->rel_entry =
708 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
709 
710 		if (!rel_head->rel_entry) {
711 			kfree(entry);
712 			kfree(rel_head);
713 			return -ENOMEM;
714 		}
715 
716 		INIT_LIST_HEAD(rel_head->rel_entry);
717 		rel_head->location = location;
718 		INIT_HLIST_NODE(&rel_head->node);
719 		if (!current_head->first) {
720 			bucket =
721 				kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
722 
723 			if (!bucket) {
724 				kfree(entry);
725 				kfree(rel_head->rel_entry);
726 				kfree(rel_head);
727 				return -ENOMEM;
728 			}
729 
730 			INIT_LIST_HEAD(&bucket->head);
731 			bucket->bucket = current_head;
732 			list_add(&bucket->head, used_buckets_list);
733 		}
734 		hlist_add_head(&rel_head->node, current_head);
735 	}
736 
737 	/* Add relocation to head of discovered rel_head */
738 	list_add_tail(&entry->head, rel_head->rel_entry);
739 
740 	return 0;
741 }
742 
743 static unsigned int
744 initialize_relocation_hashtable(unsigned int num_relocations,
745 				struct hlist_head **relocation_hashtable)
746 {
747 	/* Can safely assume that bits is not greater than sizeof(long) */
748 	unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
749 	/*
750 	 * When hashtable_size == 1, hashtable_bits == 0.
751 	 * This is valid because the hashing algorithm returns 0 in this case.
752 	 */
753 	unsigned int hashtable_bits = ilog2(hashtable_size);
754 
755 	/*
756 	 * Double size of hashtable if num_relocations * 1.25 is greater than
757 	 * hashtable_size.
758 	 */
759 	int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
760 
761 	hashtable_bits += should_double_size;
762 
763 	hashtable_size <<= should_double_size;
764 
765 	*relocation_hashtable = kmalloc_array(hashtable_size,
766 					      sizeof(**relocation_hashtable),
767 					      GFP_KERNEL);
768 	if (!*relocation_hashtable)
769 		return 0;
770 
771 	__hash_init(*relocation_hashtable, hashtable_size);
772 
773 	return hashtable_bits;
774 }
775 
776 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
777 		       unsigned int symindex, unsigned int relsec,
778 		       struct module *me)
779 {
780 	Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
781 	int (*handler)(struct module *me, void *location, Elf_Addr v);
782 	Elf_Sym *sym;
783 	void *location;
784 	unsigned int i, type;
785 	unsigned int j_idx = 0;
786 	Elf_Addr v;
787 	int res;
788 	unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
789 	struct hlist_head *relocation_hashtable;
790 	unsigned int hashtable_bits;
791 	LIST_HEAD(used_buckets_list);
792 
793 	hashtable_bits = initialize_relocation_hashtable(num_relocations,
794 							 &relocation_hashtable);
795 
796 	if (!relocation_hashtable)
797 		return -ENOMEM;
798 
799 	pr_debug("Applying relocate section %u to %u\n", relsec,
800 	       sechdrs[relsec].sh_info);
801 
802 	for (i = 0; i < num_relocations; i++) {
803 		/* This is where to make the change */
804 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
805 			+ rel[i].r_offset;
806 		/* This is the symbol it is referring to */
807 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
808 			+ ELF_RISCV_R_SYM(rel[i].r_info);
809 		if (IS_ERR_VALUE(sym->st_value)) {
810 			/* Ignore unresolved weak symbol */
811 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
812 				continue;
813 			pr_warn("%s: Unknown symbol %s\n",
814 				me->name, strtab + sym->st_name);
815 			return -ENOENT;
816 		}
817 
818 		type = ELF_RISCV_R_TYPE(rel[i].r_info);
819 
820 		if (type < ARRAY_SIZE(reloc_handlers))
821 			handler = reloc_handlers[type].reloc_handler;
822 		else
823 			handler = NULL;
824 
825 		if (!handler) {
826 			pr_err("%s: Unknown relocation type %u\n",
827 			       me->name, type);
828 			return -EINVAL;
829 		}
830 
831 		v = sym->st_value + rel[i].r_addend;
832 
833 		if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
834 			unsigned int j = j_idx;
835 			bool found = false;
836 
837 			do {
838 				unsigned long hi20_loc =
839 					sechdrs[sechdrs[relsec].sh_info].sh_addr
840 					+ rel[j].r_offset;
841 				u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
842 
843 				/* Find the corresponding HI20 relocation entry */
844 				if (hi20_loc == sym->st_value
845 				    && (hi20_type == R_RISCV_PCREL_HI20
846 					|| hi20_type == R_RISCV_GOT_HI20)) {
847 					s32 hi20, lo12;
848 					Elf_Sym *hi20_sym =
849 						(Elf_Sym *)sechdrs[symindex].sh_addr
850 						+ ELF_RISCV_R_SYM(rel[j].r_info);
851 					unsigned long hi20_sym_val =
852 						hi20_sym->st_value
853 						+ rel[j].r_addend;
854 
855 					/* Calculate lo12 */
856 					size_t offset = hi20_sym_val - hi20_loc;
857 					if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
858 					    && hi20_type == R_RISCV_GOT_HI20) {
859 						offset = module_emit_got_entry(
860 							 me, hi20_sym_val);
861 						offset = offset - hi20_loc;
862 					}
863 					hi20 = (offset + 0x800) & 0xfffff000;
864 					lo12 = offset - hi20;
865 					v = lo12;
866 					found = true;
867 
868 					break;
869 				}
870 
871 				j++;
872 				if (j > sechdrs[relsec].sh_size / sizeof(*rel))
873 					j = 0;
874 
875 			} while (j_idx != j);
876 
877 			if (!found) {
878 				pr_err(
879 				  "%s: Can not find HI20 relocation information\n",
880 				  me->name);
881 				return -EINVAL;
882 			}
883 
884 			/* Record the previous j-loop end index */
885 			j_idx = j;
886 		}
887 
888 		if (reloc_handlers[type].accumulate_handler)
889 			res = add_relocation_to_accumulate(me, type, location,
890 							   hashtable_bits, v,
891 							   relocation_hashtable,
892 							   &used_buckets_list);
893 		else
894 			res = handler(me, location, v);
895 		if (res)
896 			return res;
897 	}
898 
899 	process_accumulated_relocations(me, &relocation_hashtable,
900 					&used_buckets_list);
901 
902 	return 0;
903 }
904 
905 int module_finalize(const Elf_Ehdr *hdr,
906 		    const Elf_Shdr *sechdrs,
907 		    struct module *me)
908 {
909 	const Elf_Shdr *s;
910 
911 	s = find_section(hdr, sechdrs, ".alternative");
912 	if (s)
913 		apply_module_alternatives((void *)s->sh_addr, s->sh_size);
914 
915 	return 0;
916 }
917