xref: /linux/arch/loongarch/kernel/relocate.c (revision ff57d59200baadfdb41f94a49fed7d161a9a8124)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Kernel relocation at boot time
4  *
5  * Copyright (C) 2023 Loongson Technology Corporation Limited
6  */
7 
8 #include <linux/elf.h>
9 #include <linux/kernel.h>
10 #include <linux/printk.h>
11 #include <linux/panic_notifier.h>
12 #include <linux/start_kernel.h>
13 #include <asm/bootinfo.h>
14 #include <asm/early_ioremap.h>
15 #include <asm/inst.h>
16 #include <asm/io.h>
17 #include <asm/sections.h>
18 #include <asm/setup.h>
19 
20 #define RELOCATED(x) ((void *)((long)x + reloc_offset))
21 #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
22 
23 static unsigned long reloc_offset;
24 
relocate_relative(void)25 static inline void __init relocate_relative(void)
26 {
27 	Elf64_Rela *rela, *rela_end;
28 	rela = (Elf64_Rela *)&__rela_dyn_begin;
29 	rela_end = (Elf64_Rela *)&__rela_dyn_end;
30 
31 	for ( ; rela < rela_end; rela++) {
32 		Elf64_Addr addr = rela->r_offset;
33 		Elf64_Addr relocated_addr = rela->r_addend;
34 
35 		if (rela->r_info != R_LARCH_RELATIVE)
36 			continue;
37 
38 		relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
39 		*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
40 	}
41 
42 #ifdef CONFIG_RELR
43 	u64 *addr = NULL;
44 	u64 *relr = (u64 *)&__relr_dyn_begin;
45 	u64 *relr_end = (u64 *)&__relr_dyn_end;
46 
47 	for ( ; relr < relr_end; relr++) {
48 		if ((*relr & 1) == 0) {
49 			addr = (u64 *)(*relr + reloc_offset);
50 			*addr++ += reloc_offset;
51 		} else {
52 			for (u64 *p = addr, r = *relr >> 1; r; p++, r >>= 1)
53 				if (r & 1)
54 					*p += reloc_offset;
55 			addr += 63;
56 		}
57 	}
58 #endif
59 }
60 
relocate_absolute(long random_offset)61 static inline void __init relocate_absolute(long random_offset)
62 {
63 	void *begin, *end;
64 	struct rela_la_abs *p;
65 
66 	begin = RELOCATED_KASLR(&__la_abs_begin);
67 	end   = RELOCATED_KASLR(&__la_abs_end);
68 
69 	for (p = begin; (void *)p < end; p++) {
70 		long v = p->symvalue;
71 		uint32_t lu12iw, ori;
72 #ifdef CONFIG_64BIT
73 		uint32_t lu32id, lu52id;
74 #endif
75 		union loongarch_instruction *insn = (void *)p->pc;
76 
77 		lu12iw = (v >> 12) & 0xfffff;
78 		ori    = v & 0xfff;
79 #ifdef CONFIG_64BIT
80 		lu32id = (v >> 32) & 0xfffff;
81 		lu52id = v >> 52;
82 #endif
83 
84 		insn[0].reg1i20_format.immediate = lu12iw;
85 		insn[1].reg2i12_format.immediate = ori;
86 #ifdef CONFIG_64BIT
87 		insn[2].reg1i20_format.immediate = lu32id;
88 		insn[3].reg2i12_format.immediate = lu52id;
89 #endif
90 	}
91 }
92 
93 #ifdef CONFIG_RANDOMIZE_BASE
rotate_xor(unsigned long hash,const void * area,size_t size)94 static inline __init unsigned long rotate_xor(unsigned long hash,
95 					      const void *area, size_t size)
96 {
97 	size_t i, diff;
98 	const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
99 
100 	diff = (void *)ptr - area;
101 	if (size < diff + sizeof(hash))
102 		return hash;
103 
104 	size = ALIGN_DOWN(size - diff, sizeof(hash));
105 
106 	for (i = 0; i < size / sizeof(hash); i++) {
107 		/* Rotate by odd number of bits and XOR. */
108 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
109 		hash ^= ptr[i];
110 	}
111 
112 	return hash;
113 }
114 
get_random_boot(void)115 static inline __init unsigned long get_random_boot(void)
116 {
117 	unsigned long hash = 0;
118 	unsigned long entropy = random_get_entropy();
119 
120 	/* Attempt to create a simple but unpredictable starting entropy. */
121 	hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
122 
123 	/* Add in any runtime entropy we can get */
124 	hash = rotate_xor(hash, &entropy, sizeof(entropy));
125 
126 	return hash;
127 }
128 
nokaslr(char * p)129 static int __init nokaslr(char *p)
130 {
131 	return 0; /* Just silence the boot warning */
132 }
133 early_param("nokaslr", nokaslr);
134 
135 #define KASLR_DISABLED_MESSAGE "KASLR is disabled by %s in %s cmdline.\n"
136 
kaslr_disabled(void)137 static inline __init bool kaslr_disabled(void)
138 {
139 	char *str;
140 	const char *builtin_cmdline = CONFIG_CMDLINE;
141 
142 	str = strstr(builtin_cmdline, "nokaslr");
143 	if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) {
144 		pr_info(KASLR_DISABLED_MESSAGE, "\'nokaslr\'", "built-in");
145 		return true;
146 	}
147 
148 	str = strstr(boot_command_line, "nokaslr");
149 	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) {
150 		pr_info(KASLR_DISABLED_MESSAGE, "\'nokaslr\'", "bootloader");
151 		return true;
152 	}
153 
154 #ifdef CONFIG_HIBERNATION
155 	str = strstr(builtin_cmdline, "nohibernate");
156 	if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
157 		return false;
158 
159 	str = strstr(boot_command_line, "nohibernate");
160 	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
161 		return false;
162 
163 	str = strstr(builtin_cmdline, "noresume");
164 	if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
165 		return false;
166 
167 	str = strstr(boot_command_line, "noresume");
168 	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
169 		return false;
170 
171 	str = strstr(builtin_cmdline, "resume=");
172 	if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) {
173 		pr_info(KASLR_DISABLED_MESSAGE, "\'resume=\'", "built-in");
174 		return true;
175 	}
176 
177 	str = strstr(boot_command_line, "resume=");
178 	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) {
179 		pr_info(KASLR_DISABLED_MESSAGE, "\'resume=\'", "bootloader");
180 		return true;
181 	}
182 #endif
183 
184 	str = strstr(boot_command_line, "kexec_file");
185 	if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) {
186 		pr_info(KASLR_DISABLED_MESSAGE, "\'kexec_file\'", "bootloader");
187 		return true;
188 	}
189 
190 	return false;
191 }
192 
193 /* Choose a new address for the kernel */
determine_relocation_address(void)194 static inline void __init *determine_relocation_address(void)
195 {
196 	unsigned long kernel_length;
197 	unsigned long random_offset;
198 	void *destination = _text;
199 
200 	if (kaslr_disabled())
201 		return destination;
202 
203 	kernel_length = (unsigned long)_end - (unsigned long)_text;
204 
205 	random_offset = get_random_boot() << 16;
206 	random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
207 	if (random_offset < kernel_length)
208 		random_offset += ALIGN(kernel_length, 0xffff);
209 
210 	return RELOCATED_KASLR(destination);
211 }
212 
relocation_addr_valid(void * location_new)213 static inline int __init relocation_addr_valid(void *location_new)
214 {
215 	if ((unsigned long)location_new & 0x00000ffff)
216 		return 0; /* Inappropriately aligned new location */
217 
218 	if ((unsigned long)location_new < (unsigned long)_end)
219 		return 0; /* New location overlaps original kernel */
220 
221 	return 1;
222 }
223 #endif
224 
update_reloc_offset(unsigned long * addr,long random_offset)225 static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
226 {
227 	unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
228 
229 	*new_addr = (unsigned long)reloc_offset;
230 }
231 
relocate_kernel(void)232 unsigned long __init relocate_kernel(void)
233 {
234 	unsigned long kernel_length;
235 	unsigned long random_offset = 0;
236 	void *location_new = _text; /* Default to original kernel start */
237 	char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
238 
239 	strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
240 
241 #ifdef CONFIG_RANDOMIZE_BASE
242 	location_new = determine_relocation_address();
243 
244 	/* Sanity check relocation address */
245 	if (relocation_addr_valid(location_new))
246 		random_offset = (unsigned long)location_new - (unsigned long)(_text);
247 #endif
248 	reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
249 	early_memunmap(cmdline, COMMAND_LINE_SIZE);
250 
251 	if (random_offset) {
252 		kernel_length = (unsigned long)(_end) - (unsigned long)(_text);
253 
254 		/* Copy the kernel to it's new location */
255 		memcpy(location_new, _text, kernel_length);
256 
257 		/* Sync the caches ready for execution of new kernel */
258 		__asm__ __volatile__ (
259 			"ibar 0 \t\n"
260 			"dbar 0 \t\n"
261 			::: "memory");
262 
263 		reloc_offset += random_offset;
264 
265 		/* The current thread is now within the relocated kernel */
266 		__current_thread_info = RELOCATED_KASLR(__current_thread_info);
267 
268 		update_reloc_offset(&reloc_offset, random_offset);
269 	}
270 
271 	if (reloc_offset)
272 		relocate_relative();
273 
274 	relocate_absolute(random_offset);
275 
276 	return random_offset;
277 }
278 
279 /*
280  * Show relocation information on panic.
281  */
show_kernel_relocation(const char * level)282 static void show_kernel_relocation(const char *level)
283 {
284 	if (reloc_offset > 0) {
285 		printk(level);
286 		pr_cont("Kernel relocated by 0x%lx\n", reloc_offset);
287 		pr_cont(" .text @ 0x%px\n", _text);
288 		pr_cont(" .data @ 0x%px\n", _sdata);
289 		pr_cont(" .bss  @ 0x%px\n", __bss_start);
290 	}
291 }
292 
kernel_location_notifier_fn(struct notifier_block * self,unsigned long v,void * p)293 static int kernel_location_notifier_fn(struct notifier_block *self,
294 				       unsigned long v, void *p)
295 {
296 	show_kernel_relocation(KERN_EMERG);
297 	return NOTIFY_DONE;
298 }
299 
300 static struct notifier_block kernel_location_notifier = {
301 	.notifier_call = kernel_location_notifier_fn
302 };
303 
register_kernel_offset_dumper(void)304 static int __init register_kernel_offset_dumper(void)
305 {
306 	atomic_notifier_chain_register(&panic_notifier_list,
307 				       &kernel_location_notifier);
308 	return 0;
309 }
310 
311 arch_initcall(register_kernel_offset_dumper);
312