xref: /linux/arch/powerpc/mm/nohash/kaslr_booke.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
4 
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/stddef.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/libfdt.h>
16 #include <linux/crash_reserve.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <asm/cacheflush.h>
20 #include <asm/kdump.h>
21 #include <mm/mmu_decl.h>
22 
23 struct regions {
24 	unsigned long pa_start;
25 	unsigned long pa_end;
26 	unsigned long kernel_size;
27 	unsigned long dtb_start;
28 	unsigned long dtb_end;
29 	unsigned long initrd_start;
30 	unsigned long initrd_end;
31 	unsigned long crash_start;
32 	unsigned long crash_end;
33 	int reserved_mem;
34 	int reserved_mem_addr_cells;
35 	int reserved_mem_size_cells;
36 };
37 
38 struct regions __initdata regions;
39 
40 static __init void kaslr_get_cmdline(void *fdt)
41 {
42 	early_init_dt_scan_chosen(boot_command_line);
43 }
44 
45 static unsigned long __init rotate_xor(unsigned long hash, const void *area,
46 				       size_t size)
47 {
48 	size_t i;
49 	const unsigned long *ptr = area;
50 
51 	for (i = 0; i < size / sizeof(hash); i++) {
52 		/* Rotate by odd number of bits and XOR. */
53 		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
54 		hash ^= ptr[i];
55 	}
56 
57 	return hash;
58 }
59 
60 /* Attempt to create a simple starting entropy. This can make it defferent for
61  * every build but it is still not enough. Stronger entropy should
62  * be added to make it change for every boot.
63  */
64 static unsigned long __init get_boot_seed(void *fdt)
65 {
66 	unsigned long hash = 0;
67 
68 	/* build-specific string for starting entropy. */
69 	hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
70 	hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
71 
72 	return hash;
73 }
74 
75 static __init u64 get_kaslr_seed(void *fdt)
76 {
77 	int node, len;
78 	fdt64_t *prop;
79 	u64 ret;
80 
81 	node = fdt_path_offset(fdt, "/chosen");
82 	if (node < 0)
83 		return 0;
84 
85 	prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
86 	if (!prop || len != sizeof(u64))
87 		return 0;
88 
89 	ret = fdt64_to_cpu(*prop);
90 	*prop = 0;
91 	return ret;
92 }
93 
94 static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
95 {
96 	return e1 >= s2 && e2 >= s1;
97 }
98 
99 static __init bool overlaps_reserved_region(const void *fdt, u32 start,
100 					    u32 end)
101 {
102 	int subnode, len, i;
103 	u64 base, size;
104 
105 	/* check for overlap with /memreserve/ entries */
106 	for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
107 		if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
108 			continue;
109 		if (regions_overlap(start, end, base, base + size))
110 			return true;
111 	}
112 
113 	if (regions.reserved_mem < 0)
114 		return false;
115 
116 	/* check for overlap with static reservations in /reserved-memory */
117 	for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
118 	     subnode >= 0;
119 	     subnode = fdt_next_subnode(fdt, subnode)) {
120 		const fdt32_t *reg;
121 		u64 rsv_end;
122 
123 		len = 0;
124 		reg = fdt_getprop(fdt, subnode, "reg", &len);
125 		while (len >= (regions.reserved_mem_addr_cells +
126 			       regions.reserved_mem_size_cells)) {
127 			base = fdt32_to_cpu(reg[0]);
128 			if (regions.reserved_mem_addr_cells == 2)
129 				base = (base << 32) | fdt32_to_cpu(reg[1]);
130 
131 			reg += regions.reserved_mem_addr_cells;
132 			len -= 4 * regions.reserved_mem_addr_cells;
133 
134 			size = fdt32_to_cpu(reg[0]);
135 			if (regions.reserved_mem_size_cells == 2)
136 				size = (size << 32) | fdt32_to_cpu(reg[1]);
137 
138 			reg += regions.reserved_mem_size_cells;
139 			len -= 4 * regions.reserved_mem_size_cells;
140 
141 			if (base >= regions.pa_end)
142 				continue;
143 
144 			rsv_end = min(base + size, (u64)U32_MAX);
145 
146 			if (regions_overlap(start, end, base, rsv_end))
147 				return true;
148 		}
149 	}
150 	return false;
151 }
152 
153 static __init bool overlaps_region(const void *fdt, u32 start,
154 				   u32 end)
155 {
156 	if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
157 		return true;
158 
159 	if (regions_overlap(start, end, regions.dtb_start,
160 			    regions.dtb_end))
161 		return true;
162 
163 	if (regions_overlap(start, end, regions.initrd_start,
164 			    regions.initrd_end))
165 		return true;
166 
167 	if (regions_overlap(start, end, regions.crash_start,
168 			    regions.crash_end))
169 		return true;
170 
171 	return overlaps_reserved_region(fdt, start, end);
172 }
173 
174 static void __init get_crash_kernel(void *fdt, unsigned long size)
175 {
176 #ifdef CONFIG_CRASH_RESERVE
177 	unsigned long long crash_size, crash_base;
178 	int ret;
179 
180 	ret = parse_crashkernel(boot_command_line, size, &crash_size,
181 				&crash_base, NULL, NULL);
182 	if (ret != 0 || crash_size == 0)
183 		return;
184 	if (crash_base == 0)
185 		crash_base = KDUMP_KERNELBASE;
186 
187 	regions.crash_start = (unsigned long)crash_base;
188 	regions.crash_end = (unsigned long)(crash_base + crash_size);
189 
190 	pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
191 #endif
192 }
193 
194 static void __init get_initrd_range(void *fdt)
195 {
196 	u64 start, end;
197 	int node, len;
198 	const __be32 *prop;
199 
200 	node = fdt_path_offset(fdt, "/chosen");
201 	if (node < 0)
202 		return;
203 
204 	prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
205 	if (!prop)
206 		return;
207 	start = of_read_number(prop, len / 4);
208 
209 	prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
210 	if (!prop)
211 		return;
212 	end = of_read_number(prop, len / 4);
213 
214 	regions.initrd_start = (unsigned long)start;
215 	regions.initrd_end = (unsigned long)end;
216 
217 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
218 }
219 
220 static __init unsigned long get_usable_address(const void *fdt,
221 					       unsigned long start,
222 					       unsigned long offset)
223 {
224 	unsigned long pa;
225 	unsigned long pa_end;
226 
227 	for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
228 		pa_end = pa + regions.kernel_size;
229 		if (overlaps_region(fdt, pa, pa_end))
230 			continue;
231 
232 		return pa;
233 	}
234 	return 0;
235 }
236 
237 static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
238 				  int *size_cells)
239 {
240 	const int *prop;
241 	int len;
242 
243 	/*
244 	 * Retrieve the #address-cells and #size-cells properties
245 	 * from the 'node', or use the default if not provided.
246 	 */
247 	*addr_cells = *size_cells = 1;
248 
249 	prop = fdt_getprop(fdt, node, "#address-cells", &len);
250 	if (len == 4)
251 		*addr_cells = fdt32_to_cpu(*prop);
252 	prop = fdt_getprop(fdt, node, "#size-cells", &len);
253 	if (len == 4)
254 		*size_cells = fdt32_to_cpu(*prop);
255 }
256 
257 static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
258 					       unsigned long offset)
259 {
260 	unsigned long koffset = 0;
261 	unsigned long start;
262 
263 	while ((long)index >= 0) {
264 		offset = memstart_addr + index * SZ_64M + offset;
265 		start = memstart_addr + index * SZ_64M;
266 		koffset = get_usable_address(dt_ptr, start, offset);
267 		if (koffset)
268 			break;
269 		index--;
270 	}
271 
272 	if (koffset != 0)
273 		koffset -= memstart_addr;
274 
275 	return koffset;
276 }
277 
278 static inline __init bool kaslr_disabled(void)
279 {
280 	return strstr(boot_command_line, "nokaslr") != NULL;
281 }
282 
283 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
284 						  unsigned long kernel_sz)
285 {
286 	unsigned long offset, random;
287 	unsigned long ram, linear_sz;
288 	u64 seed;
289 	unsigned long index;
290 
291 	kaslr_get_cmdline(dt_ptr);
292 	if (kaslr_disabled())
293 		return 0;
294 
295 	random = get_boot_seed(dt_ptr);
296 
297 	seed = get_tb() << 32;
298 	seed ^= get_tb();
299 	random = rotate_xor(random, &seed, sizeof(seed));
300 
301 	/*
302 	 * Retrieve (and wipe) the seed from the FDT
303 	 */
304 	seed = get_kaslr_seed(dt_ptr);
305 	if (seed)
306 		random = rotate_xor(random, &seed, sizeof(seed));
307 	else
308 		pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
309 
310 	ram = min_t(phys_addr_t, __max_low_memory, size);
311 	ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
312 	linear_sz = min_t(unsigned long, ram, SZ_512M);
313 
314 	/* If the linear size is smaller than 64M, do not randomize */
315 	if (linear_sz < SZ_64M)
316 		return 0;
317 
318 	/* check for a reserved-memory node and record its cell sizes */
319 	regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
320 	if (regions.reserved_mem >= 0)
321 		get_cell_sizes(dt_ptr, regions.reserved_mem,
322 			       &regions.reserved_mem_addr_cells,
323 			       &regions.reserved_mem_size_cells);
324 
325 	regions.pa_start = memstart_addr;
326 	regions.pa_end = memstart_addr + linear_sz;
327 	regions.dtb_start = __pa(dt_ptr);
328 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
329 	regions.kernel_size = kernel_sz;
330 
331 	get_initrd_range(dt_ptr);
332 	get_crash_kernel(dt_ptr, ram);
333 
334 	/*
335 	 * Decide which 64M we want to start
336 	 * Only use the low 8 bits of the random seed
337 	 */
338 	index = random & 0xFF;
339 	index %= linear_sz / SZ_64M;
340 
341 	/* Decide offset inside 64M */
342 	offset = random % (SZ_64M - kernel_sz);
343 	offset = round_down(offset, SZ_16K);
344 
345 	return kaslr_legal_offset(dt_ptr, index, offset);
346 }
347 
348 /*
349  * To see if we need to relocate the kernel to a random offset
350  * void *dt_ptr - address of the device tree
351  * phys_addr_t size - size of the first memory block
352  */
353 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
354 {
355 	unsigned long tlb_virt;
356 	phys_addr_t tlb_phys;
357 	unsigned long offset;
358 	unsigned long kernel_sz;
359 
360 	kernel_sz = (unsigned long)_end - (unsigned long)_stext;
361 
362 	offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
363 	if (offset == 0)
364 		return;
365 
366 	kernstart_virt_addr += offset;
367 	kernstart_addr += offset;
368 
369 	is_second_reloc = 1;
370 
371 	if (offset >= SZ_64M) {
372 		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
373 		tlb_phys = round_down(kernstart_addr, SZ_64M);
374 
375 		/* Create kernel map to relocate in */
376 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
377 	}
378 
379 	/* Copy the kernel to its new location and run */
380 	memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
381 	flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
382 
383 	reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
384 }
385 
386 void __init kaslr_late_init(void)
387 {
388 	/* If randomized, clear the original kernel */
389 	if (kernstart_virt_addr != KERNELBASE) {
390 		unsigned long kernel_sz;
391 
392 		kernel_sz = (unsigned long)_end - kernstart_virt_addr;
393 		memzero_explicit((void *)KERNELBASE, kernel_sz);
394 	}
395 }
396