xref: /linux/arch/s390/boot/vmem.c (revision 78f608d7aff05c245bf0aab00ce7273a7d9f04b9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/page-states.h>
6 #include <asm/pgalloc.h>
7 #include <asm/facility.h>
8 #include <asm/sections.h>
9 #include <asm/ctlreg.h>
10 #include <asm/physmem_info.h>
11 #include <asm/maccess.h>
12 #include <asm/abs_lowcore.h>
13 #include "decompressor.h"
14 #include "boot.h"
15 
16 struct ctlreg __bootdata_preserved(s390_invalid_asce);
17 
18 #ifdef CONFIG_PROC_FS
19 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
20 #endif
21 
22 #define init_mm			(*(struct mm_struct *)vmlinux.init_mm_off)
23 #define swapper_pg_dir		vmlinux.swapper_pg_dir_off
24 #define invalid_pg_dir		vmlinux.invalid_pg_dir_off
25 
26 enum populate_mode {
27 	POPULATE_NONE,
28 	POPULATE_DIRECT,
29 	POPULATE_ABS_LOWCORE,
30 	POPULATE_IDENTITY,
31 	POPULATE_KERNEL,
32 #ifdef CONFIG_KASAN
33 	POPULATE_KASAN_MAP_SHADOW,
34 	POPULATE_KASAN_ZERO_SHADOW,
35 	POPULATE_KASAN_SHALLOW
36 #endif
37 };
38 
39 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
40 
41 #ifdef CONFIG_KASAN
42 
43 #define kasan_early_shadow_page	vmlinux.kasan_early_shadow_page_off
44 #define kasan_early_shadow_pte	((pte_t *)vmlinux.kasan_early_shadow_pte_off)
45 #define kasan_early_shadow_pmd	((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
46 #define kasan_early_shadow_pud	((pud_t *)vmlinux.kasan_early_shadow_pud_off)
47 #define kasan_early_shadow_p4d	((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
48 #define __sha(x)		((unsigned long)kasan_mem_to_shadow((void *)x))
49 
50 static pte_t pte_z;
51 
52 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
53 {
54 	start = PAGE_ALIGN_DOWN(__sha(start));
55 	end = PAGE_ALIGN(__sha(end));
56 	pgtable_populate(start, end, mode);
57 }
58 
59 static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
60 {
61 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
62 	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
63 	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
64 	unsigned long memgap_start = 0;
65 	unsigned long untracked_end;
66 	unsigned long start, end;
67 	int i;
68 
69 	pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
70 	if (!machine.has_nx)
71 		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
72 	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
73 	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
74 	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
75 	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
76 	__arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER);
77 	__arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER);
78 	__arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
79 	__arch_set_page_dat(kasan_early_shadow_pte, 1);
80 
81 	for_each_physmem_usable_range(i, &start, &end) {
82 		kasan_populate((unsigned long)__identity_va(start),
83 			       (unsigned long)__identity_va(end),
84 			       POPULATE_KASAN_MAP_SHADOW);
85 		if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260) {
86 			kasan_populate((unsigned long)__identity_va(memgap_start),
87 				       (unsigned long)__identity_va(start),
88 				       POPULATE_KASAN_ZERO_SHADOW);
89 		}
90 		memgap_start = end;
91 	}
92 	kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
93 	kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
94 	kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
95 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
96 		untracked_end = VMALLOC_START;
97 		/* shallowly populate kasan shadow for vmalloc and modules */
98 		kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
99 	} else {
100 		untracked_end = MODULES_VADDR;
101 	}
102 	/* populate kasan shadow for untracked memory */
103 	kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
104 		       POPULATE_KASAN_ZERO_SHADOW);
105 	kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
106 }
107 
108 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
109 					   unsigned long end, enum populate_mode mode)
110 {
111 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
112 	    IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
113 		pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
114 		return true;
115 	}
116 	return false;
117 }
118 
119 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
120 					   unsigned long end, enum populate_mode mode)
121 {
122 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
123 	    IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
124 		p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
125 		return true;
126 	}
127 	return false;
128 }
129 
130 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
131 					   unsigned long end, enum populate_mode mode)
132 {
133 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
134 	    IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
135 		pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
136 		return true;
137 	}
138 	return false;
139 }
140 
141 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
142 					   unsigned long end, enum populate_mode mode)
143 {
144 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
145 	    IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
146 		pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
147 		return true;
148 	}
149 	return false;
150 }
151 
152 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
153 {
154 	if (mode == POPULATE_KASAN_ZERO_SHADOW) {
155 		set_pte(pte, pte_z);
156 		return true;
157 	}
158 	return false;
159 }
160 #else
161 
162 static inline void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
163 {
164 }
165 
166 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
167 						  unsigned long end, enum populate_mode mode)
168 {
169 	return false;
170 }
171 
172 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
173 						  unsigned long end, enum populate_mode mode)
174 {
175 	return false;
176 }
177 
178 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
179 						  unsigned long end, enum populate_mode mode)
180 {
181 	return false;
182 }
183 
184 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
185 						  unsigned long end, enum populate_mode mode)
186 {
187 	return false;
188 }
189 
190 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
191 {
192 	return false;
193 }
194 
195 #endif
196 
197 /*
198  * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
199  */
200 static inline pte_t *__virt_to_kpte(unsigned long va)
201 {
202 	return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
203 }
204 
205 static void *boot_crst_alloc(unsigned long val)
206 {
207 	unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
208 	unsigned long *table;
209 
210 	table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
211 	crst_table_init(table, val);
212 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
213 	return table;
214 }
215 
216 static pte_t *boot_pte_alloc(void)
217 {
218 	static void *pte_leftover;
219 	pte_t *pte;
220 
221 	/*
222 	 * handling pte_leftovers this way helps to avoid memory fragmentation
223 	 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
224 	 */
225 	if (!pte_leftover) {
226 		pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
227 		pte = pte_leftover + _PAGE_TABLE_SIZE;
228 		__arch_set_page_dat(pte, 1);
229 	} else {
230 		pte = pte_leftover;
231 		pte_leftover = NULL;
232 	}
233 
234 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
235 	return pte;
236 }
237 
238 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
239 {
240 	switch (mode) {
241 	case POPULATE_NONE:
242 		return -1;
243 	case POPULATE_DIRECT:
244 		return addr;
245 	case POPULATE_ABS_LOWCORE:
246 		return __abs_lowcore_pa(addr);
247 	case POPULATE_KERNEL:
248 		return __kernel_pa(addr);
249 	case POPULATE_IDENTITY:
250 		return __identity_pa(addr);
251 #ifdef CONFIG_KASAN
252 	case POPULATE_KASAN_MAP_SHADOW:
253 		addr = physmem_alloc_top_down(RR_VMEM, size, size);
254 		memset((void *)addr, 0, size);
255 		return addr;
256 #endif
257 	default:
258 		return -1;
259 	}
260 }
261 
262 static bool large_allowed(enum populate_mode mode)
263 {
264 	return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
265 }
266 
267 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
268 			  enum populate_mode mode)
269 {
270 	return machine.has_edat2 && large_allowed(mode) &&
271 	       IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
272 }
273 
274 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
275 			  enum populate_mode mode)
276 {
277 	return machine.has_edat1 && large_allowed(mode) &&
278 	       IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
279 }
280 
281 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
282 				 enum populate_mode mode)
283 {
284 	unsigned long pages = 0;
285 	pte_t *pte, entry;
286 
287 	pte = pte_offset_kernel(pmd, addr);
288 	for (; addr < end; addr += PAGE_SIZE, pte++) {
289 		if (pte_none(*pte)) {
290 			if (kasan_pte_populate_zero_shadow(pte, mode))
291 				continue;
292 			entry = __pte(_pa(addr, PAGE_SIZE, mode));
293 			entry = set_pte_bit(entry, PAGE_KERNEL);
294 			if (!machine.has_nx)
295 				entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
296 			set_pte(pte, entry);
297 			pages++;
298 		}
299 	}
300 	if (mode == POPULATE_DIRECT)
301 		update_page_count(PG_DIRECT_MAP_4K, pages);
302 }
303 
304 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
305 				 enum populate_mode mode)
306 {
307 	unsigned long next, pages = 0;
308 	pmd_t *pmd, entry;
309 	pte_t *pte;
310 
311 	pmd = pmd_offset(pud, addr);
312 	for (; addr < end; addr = next, pmd++) {
313 		next = pmd_addr_end(addr, end);
314 		if (pmd_none(*pmd)) {
315 			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
316 				continue;
317 			if (can_large_pmd(pmd, addr, next, mode)) {
318 				entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
319 				entry = set_pmd_bit(entry, SEGMENT_KERNEL);
320 				if (!machine.has_nx)
321 					entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
322 				set_pmd(pmd, entry);
323 				pages++;
324 				continue;
325 			}
326 			pte = boot_pte_alloc();
327 			pmd_populate(&init_mm, pmd, pte);
328 		} else if (pmd_leaf(*pmd)) {
329 			continue;
330 		}
331 		pgtable_pte_populate(pmd, addr, next, mode);
332 	}
333 	if (mode == POPULATE_DIRECT)
334 		update_page_count(PG_DIRECT_MAP_1M, pages);
335 }
336 
337 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
338 				 enum populate_mode mode)
339 {
340 	unsigned long next, pages = 0;
341 	pud_t *pud, entry;
342 	pmd_t *pmd;
343 
344 	pud = pud_offset(p4d, addr);
345 	for (; addr < end; addr = next, pud++) {
346 		next = pud_addr_end(addr, end);
347 		if (pud_none(*pud)) {
348 			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
349 				continue;
350 			if (can_large_pud(pud, addr, next, mode)) {
351 				entry = __pud(_pa(addr, _REGION3_SIZE, mode));
352 				entry = set_pud_bit(entry, REGION3_KERNEL);
353 				if (!machine.has_nx)
354 					entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
355 				set_pud(pud, entry);
356 				pages++;
357 				continue;
358 			}
359 			pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
360 			pud_populate(&init_mm, pud, pmd);
361 		} else if (pud_leaf(*pud)) {
362 			continue;
363 		}
364 		pgtable_pmd_populate(pud, addr, next, mode);
365 	}
366 	if (mode == POPULATE_DIRECT)
367 		update_page_count(PG_DIRECT_MAP_2G, pages);
368 }
369 
370 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
371 				 enum populate_mode mode)
372 {
373 	unsigned long next;
374 	p4d_t *p4d;
375 	pud_t *pud;
376 
377 	p4d = p4d_offset(pgd, addr);
378 	for (; addr < end; addr = next, p4d++) {
379 		next = p4d_addr_end(addr, end);
380 		if (p4d_none(*p4d)) {
381 			if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
382 				continue;
383 			pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
384 			p4d_populate(&init_mm, p4d, pud);
385 		}
386 		pgtable_pud_populate(p4d, addr, next, mode);
387 	}
388 }
389 
390 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
391 {
392 	unsigned long next;
393 	pgd_t *pgd;
394 	p4d_t *p4d;
395 
396 	pgd = pgd_offset(&init_mm, addr);
397 	for (; addr < end; addr = next, pgd++) {
398 		next = pgd_addr_end(addr, end);
399 		if (pgd_none(*pgd)) {
400 			if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
401 				continue;
402 			p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
403 			pgd_populate(&init_mm, pgd, p4d);
404 		}
405 #ifdef CONFIG_KASAN
406 		if (mode == POPULATE_KASAN_SHALLOW)
407 			continue;
408 #endif
409 		pgtable_p4d_populate(pgd, addr, next, mode);
410 	}
411 }
412 
413 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
414 {
415 	unsigned long start, end;
416 	unsigned long asce_type;
417 	unsigned long asce_bits;
418 	pgd_t *init_mm_pgd;
419 	int i;
420 
421 	/*
422 	 * Mark whole memory as no-dat. This must be done before any
423 	 * page tables are allocated, or kernel image builtin pages
424 	 * are marked as dat tables.
425 	 */
426 	for_each_physmem_online_range(i, &start, &end)
427 		__arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
428 
429 	/*
430 	 * init_mm->pgd contains virtual address of swapper_pg_dir.
431 	 * It is unusable at this stage since DAT is yet off. Swap
432 	 * it for physical address of swapper_pg_dir and restore
433 	 * the virtual address after all page tables are created.
434 	 */
435 	init_mm_pgd = init_mm.pgd;
436 	init_mm.pgd = (pgd_t *)swapper_pg_dir;
437 
438 	if (asce_limit == _REGION1_SIZE) {
439 		asce_type = _REGION2_ENTRY_EMPTY;
440 		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
441 	} else {
442 		asce_type = _REGION3_ENTRY_EMPTY;
443 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
444 	}
445 	s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
446 
447 	crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
448 	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
449 	__arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
450 	__arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
451 
452 	/*
453 	 * To allow prefixing the lowcore must be mapped with 4KB pages.
454 	 * To prevent creation of a large page at address 0 first map
455 	 * the lowcore and create the identity mapping only afterwards.
456 	 */
457 	pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
458 	for_each_physmem_usable_range(i, &start, &end) {
459 		pgtable_populate((unsigned long)__identity_va(start),
460 				 (unsigned long)__identity_va(end),
461 				 POPULATE_IDENTITY);
462 	}
463 	pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
464 	pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
465 	pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
466 			 POPULATE_ABS_LOWCORE);
467 	pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
468 			 POPULATE_NONE);
469 	memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area));
470 
471 	kasan_populate_shadow(kernel_start, kernel_end);
472 
473 	S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
474 	S390_lowcore.user_asce = s390_invalid_asce;
475 
476 	local_ctl_load(1, &S390_lowcore.kernel_asce);
477 	local_ctl_load(7, &S390_lowcore.user_asce);
478 	local_ctl_load(13, &S390_lowcore.kernel_asce);
479 
480 	init_mm.context.asce = S390_lowcore.kernel_asce.val;
481 	init_mm.pgd = init_mm_pgd;
482 }
483