xref: /linux/arch/powerpc/mm/nohash/kaslr_booke.c (revision d198b34f3855eee2571dda03eea75a09c7c31480)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
4 
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_core.h>
17 #include <asm/pgalloc.h>
18 #include <asm/prom.h>
19 #include <asm/kdump.h>
20 #include <mm/mmu_decl.h>
21 #include <generated/compile.h>
22 #include <generated/utsrelease.h>
23 
24 struct regions {
25 	unsigned long pa_start;
26 	unsigned long pa_end;
27 	unsigned long kernel_size;
28 	unsigned long dtb_start;
29 	unsigned long dtb_end;
30 	unsigned long initrd_start;
31 	unsigned long initrd_end;
32 	unsigned long crash_start;
33 	unsigned long crash_end;
34 	int reserved_mem;
35 	int reserved_mem_addr_cells;
36 	int reserved_mem_size_cells;
37 };
38 
39 /* Simplified build-specific string for starting entropy. */
40 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
41 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
42 
43 struct regions __initdata regions;
44 
45 static __init void kaslr_get_cmdline(void *fdt)
46 {
47 	int node = fdt_path_offset(fdt, "/chosen");
48 
49 	early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
50 }
51 
52 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
53 				       size_t size)
54 {
55 	size_t i;
56 	const unsigned long *ptr = area;
57 
58 	for (i = 0; i < size / sizeof(hash); i++) {
59 		/* Rotate by odd number of bits and XOR. */
60 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
61 		hash ^= ptr[i];
62 	}
63 
64 	return hash;
65 }
66 
67 /* Attempt to create a simple starting entropy. This can make it defferent for
68  * every build but it is still not enough. Stronger entropy should
69  * be added to make it change for every boot.
70  */
71 static unsigned long __init get_boot_seed(void *fdt)
72 {
73 	unsigned long hash = 0;
74 
75 	hash = rotate_xor(hash, build_str, sizeof(build_str));
76 	hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
77 
78 	return hash;
79 }
80 
81 static __init u64 get_kaslr_seed(void *fdt)
82 {
83 	int node, len;
84 	fdt64_t *prop;
85 	u64 ret;
86 
87 	node = fdt_path_offset(fdt, "/chosen");
88 	if (node < 0)
89 		return 0;
90 
91 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
92 	if (!prop || len != sizeof(u64))
93 		return 0;
94 
95 	ret = fdt64_to_cpu(*prop);
96 	*prop = 0;
97 	return ret;
98 }
99 
100 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
101 {
102 	return e1 >= s2 && e2 >= s1;
103 }
104 
105 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
106 					    u32 end)
107 {
108 	int subnode, len, i;
109 	u64 base, size;
110 
111 	/* check for overlap with /memreserve/ entries */
112 	for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
113 		if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
114 			continue;
115 		if (regions_overlap(start, end, base, base + size))
116 			return true;
117 	}
118 
119 	if (regions.reserved_mem < 0)
120 		return false;
121 
122 	/* check for overlap with static reservations in /reserved-memory */
123 	for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
124 	     subnode >= 0;
125 	     subnode = fdt_next_subnode(fdt, subnode)) {
126 		const fdt32_t *reg;
127 		u64 rsv_end;
128 
129 		len = 0;
130 		reg = fdt_getprop(fdt, subnode, "reg", &len);
131 		while (len >= (regions.reserved_mem_addr_cells +
132 			       regions.reserved_mem_size_cells)) {
133 			base = fdt32_to_cpu(reg[0]);
134 			if (regions.reserved_mem_addr_cells == 2)
135 				base = (base << 32) | fdt32_to_cpu(reg[1]);
136 
137 			reg += regions.reserved_mem_addr_cells;
138 			len -= 4 * regions.reserved_mem_addr_cells;
139 
140 			size = fdt32_to_cpu(reg[0]);
141 			if (regions.reserved_mem_size_cells == 2)
142 				size = (size << 32) | fdt32_to_cpu(reg[1]);
143 
144 			reg += regions.reserved_mem_size_cells;
145 			len -= 4 * regions.reserved_mem_size_cells;
146 
147 			if (base >= regions.pa_end)
148 				continue;
149 
150 			rsv_end = min(base + size, (u64)U32_MAX);
151 
152 			if (regions_overlap(start, end, base, rsv_end))
153 				return true;
154 		}
155 	}
156 	return false;
157 }
158 
159 static __init bool overlaps_region(const void *fdt, u32 start,
160 				   u32 end)
161 {
162 	if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
163 		return true;
164 
165 	if (regions_overlap(start, end, regions.dtb_start,
166 			    regions.dtb_end))
167 		return true;
168 
169 	if (regions_overlap(start, end, regions.initrd_start,
170 			    regions.initrd_end))
171 		return true;
172 
173 	if (regions_overlap(start, end, regions.crash_start,
174 			    regions.crash_end))
175 		return true;
176 
177 	return overlaps_reserved_region(fdt, start, end);
178 }
179 
180 static void __init get_crash_kernel(void *fdt, unsigned long size)
181 {
182 #ifdef CONFIG_CRASH_CORE
183 	unsigned long long crash_size, crash_base;
184 	int ret;
185 
186 	ret = parse_crashkernel(boot_command_line, size, &crash_size,
187 				&crash_base);
188 	if (ret != 0 || crash_size == 0)
189 		return;
190 	if (crash_base == 0)
191 		crash_base = KDUMP_KERNELBASE;
192 
193 	regions.crash_start = (unsigned long)crash_base;
194 	regions.crash_end = (unsigned long)(crash_base + crash_size);
195 
196 	pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
197 #endif
198 }
199 
200 static void __init get_initrd_range(void *fdt)
201 {
202 	u64 start, end;
203 	int node, len;
204 	const __be32 *prop;
205 
206 	node = fdt_path_offset(fdt, "/chosen");
207 	if (node < 0)
208 		return;
209 
210 	prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
211 	if (!prop)
212 		return;
213 	start = of_read_number(prop, len / 4);
214 
215 	prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
216 	if (!prop)
217 		return;
218 	end = of_read_number(prop, len / 4);
219 
220 	regions.initrd_start = (unsigned long)start;
221 	regions.initrd_end = (unsigned long)end;
222 
223 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
224 }
225 
226 static __init unsigned long get_usable_address(const void *fdt,
227 					       unsigned long start,
228 					       unsigned long offset)
229 {
230 	unsigned long pa;
231 	unsigned long pa_end;
232 
233 	for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
234 		pa_end = pa + regions.kernel_size;
235 		if (overlaps_region(fdt, pa, pa_end))
236 			continue;
237 
238 		return pa;
239 	}
240 	return 0;
241 }
242 
243 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
244 				  int *size_cells)
245 {
246 	const int *prop;
247 	int len;
248 
249 	/*
250 	 * Retrieve the #address-cells and #size-cells properties
251 	 * from the 'node', or use the default if not provided.
252 	 */
253 	*addr_cells = *size_cells = 1;
254 
255 	prop = fdt_getprop(fdt, node, "#address-cells", &len);
256 	if (len == 4)
257 		*addr_cells = fdt32_to_cpu(*prop);
258 	prop = fdt_getprop(fdt, node, "#size-cells", &len);
259 	if (len == 4)
260 		*size_cells = fdt32_to_cpu(*prop);
261 }
262 
263 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
264 					       unsigned long offset)
265 {
266 	unsigned long koffset = 0;
267 	unsigned long start;
268 
269 	while ((long)index >= 0) {
270 		offset = memstart_addr + index * SZ_64M + offset;
271 		start = memstart_addr + index * SZ_64M;
272 		koffset = get_usable_address(dt_ptr, start, offset);
273 		if (koffset)
274 			break;
275 		index--;
276 	}
277 
278 	if (koffset != 0)
279 		koffset -= memstart_addr;
280 
281 	return koffset;
282 }
283 
284 static inline __init bool kaslr_disabled(void)
285 {
286 	return strstr(boot_command_line, "nokaslr") != NULL;
287 }
288 
289 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
290 						  unsigned long kernel_sz)
291 {
292 	unsigned long offset, random;
293 	unsigned long ram, linear_sz;
294 	u64 seed;
295 	unsigned long index;
296 
297 	kaslr_get_cmdline(dt_ptr);
298 	if (kaslr_disabled())
299 		return 0;
300 
301 	random = get_boot_seed(dt_ptr);
302 
303 	seed = get_tb() << 32;
304 	seed ^= get_tb();
305 	random = rotate_xor(random, &seed, sizeof(seed));
306 
307 	/*
308 	 * Retrieve (and wipe) the seed from the FDT
309 	 */
310 	seed = get_kaslr_seed(dt_ptr);
311 	if (seed)
312 		random = rotate_xor(random, &seed, sizeof(seed));
313 	else
314 		pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
315 
316 	ram = min_t(phys_addr_t, __max_low_memory, size);
317 	ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
318 	linear_sz = min_t(unsigned long, ram, SZ_512M);
319 
320 	/* If the linear size is smaller than 64M, do not randmize */
321 	if (linear_sz < SZ_64M)
322 		return 0;
323 
324 	/* check for a reserved-memory node and record its cell sizes */
325 	regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
326 	if (regions.reserved_mem >= 0)
327 		get_cell_sizes(dt_ptr, regions.reserved_mem,
328 			       &regions.reserved_mem_addr_cells,
329 			       &regions.reserved_mem_size_cells);
330 
331 	regions.pa_start = memstart_addr;
332 	regions.pa_end = memstart_addr + linear_sz;
333 	regions.dtb_start = __pa(dt_ptr);
334 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
335 	regions.kernel_size = kernel_sz;
336 
337 	get_initrd_range(dt_ptr);
338 	get_crash_kernel(dt_ptr, ram);
339 
340 	/*
341 	 * Decide which 64M we want to start
342 	 * Only use the low 8 bits of the random seed
343 	 */
344 	index = random & 0xFF;
345 	index %= linear_sz / SZ_64M;
346 
347 	/* Decide offset inside 64M */
348 	offset = random % (SZ_64M - kernel_sz);
349 	offset = round_down(offset, SZ_16K);
350 
351 	return kaslr_legal_offset(dt_ptr, index, offset);
352 }
353 
354 /*
355  * To see if we need to relocate the kernel to a random offset
356  * void *dt_ptr - address of the device tree
357  * phys_addr_t size - size of the first memory block
358  */
359 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
360 {
361 	unsigned long tlb_virt;
362 	phys_addr_t tlb_phys;
363 	unsigned long offset;
364 	unsigned long kernel_sz;
365 
366 	kernel_sz = (unsigned long)_end - (unsigned long)_stext;
367 
368 	offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
369 	if (offset == 0)
370 		return;
371 
372 	kernstart_virt_addr += offset;
373 	kernstart_addr += offset;
374 
375 	is_second_reloc = 1;
376 
377 	if (offset >= SZ_64M) {
378 		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
379 		tlb_phys = round_down(kernstart_addr, SZ_64M);
380 
381 		/* Create kernel map to relocate in */
382 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
383 	}
384 
385 	/* Copy the kernel to it's new location and run */
386 	memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
387 	flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
388 
389 	reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
390 }
391 
392 void __init kaslr_late_init(void)
393 {
394 	/* If randomized, clear the original kernel */
395 	if (kernstart_virt_addr != KERNELBASE) {
396 		unsigned long kernel_sz;
397 
398 		kernel_sz = (unsigned long)_end - kernstart_virt_addr;
399 		memzero_explicit((void *)KERNELBASE, kernel_sz);
400 	}
401 }
402