xref: /linux/arch/s390/mm/init.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *
7  *  Derived from "arch/i386/mm/init.c"
8  *    Copyright (C) 1995  Linus Torvalds
9  */
10 
11 #include <linux/cpufeature.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/swiotlb.h>
23 #include <linux/smp.h>
24 #include <linux/init.h>
25 #include <linux/pagemap.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/pfn.h>
29 #include <linux/poison.h>
30 #include <linux/initrd.h>
31 #include <linux/export.h>
32 #include <linux/cma.h>
33 #include <linux/gfp.h>
34 #include <linux/dma-direct.h>
35 #include <linux/percpu.h>
36 #include <asm/processor.h>
37 #include <linux/uaccess.h>
38 #include <asm/pgalloc.h>
39 #include <asm/ctlreg.h>
40 #include <asm/kfence.h>
41 #include <asm/dma.h>
42 #include <asm/abs_lowcore.h>
43 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
45 #include <asm/sclp.h>
46 #include <asm/set_memory.h>
47 #include <asm/kasan.h>
48 #include <asm/dma-mapping.h>
49 #include <asm/uv.h>
50 #include <linux/virtio_anchor.h>
51 #include <linux/virtio_config.h>
52 #include <linux/execmem.h>
53 
54 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55 pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
56 
57 struct ctlreg __bootdata_preserved(s390_invalid_asce);
58 
59 unsigned long __bootdata_preserved(page_noexec_mask);
60 EXPORT_SYMBOL(page_noexec_mask);
61 
62 unsigned long __bootdata_preserved(segment_noexec_mask);
63 EXPORT_SYMBOL(segment_noexec_mask);
64 
65 unsigned long __bootdata_preserved(region_noexec_mask);
66 EXPORT_SYMBOL(region_noexec_mask);
67 
68 unsigned long empty_zero_page, zero_page_mask;
69 EXPORT_SYMBOL(empty_zero_page);
70 EXPORT_SYMBOL(zero_page_mask);
71 
72 static void __init setup_zero_pages(void)
73 {
74 	unsigned long total_pages = memblock_estimated_nr_free_pages();
75 	unsigned int order;
76 
77 	/* Latest machines require a mapping granularity of 512KB */
78 	order = 7;
79 
80 	/* Limit number of empty zero pages for small memory sizes */
81 	while (order > 2 && (total_pages >> 10) < (1UL << order))
82 		order--;
83 
84 	empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
85 
86 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
87 }
88 
89 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
90 {
91 	max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
92 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
93 }
94 
95 /*
96  * paging_init() sets up the page tables
97  */
98 void __init paging_init(void)
99 {
100 	vmem_map_init();
101 	zone_dma_limit = DMA_BIT_MASK(31);
102 }
103 
104 void mark_rodata_ro(void)
105 {
106 	unsigned long size = __end_ro_after_init - __start_ro_after_init;
107 
108 	if (cpu_has_nx())
109 		system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
110 	__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
111 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
112 }
113 
114 int set_memory_encrypted(unsigned long vaddr, int numpages)
115 {
116 	int i;
117 
118 	/* make specified pages unshared, (swiotlb, dma_free) */
119 	for (i = 0; i < numpages; ++i) {
120 		uv_remove_shared(virt_to_phys((void *)vaddr));
121 		vaddr += PAGE_SIZE;
122 	}
123 	return 0;
124 }
125 
126 int set_memory_decrypted(unsigned long vaddr, int numpages)
127 {
128 	int i;
129 	/* make specified pages shared (swiotlb, dma_alloca) */
130 	for (i = 0; i < numpages; ++i) {
131 		uv_set_shared(virt_to_phys((void *)vaddr));
132 		vaddr += PAGE_SIZE;
133 	}
134 	return 0;
135 }
136 
137 /* are we a protected virtualization guest? */
138 bool force_dma_unencrypted(struct device *dev)
139 {
140 	return is_prot_virt_guest();
141 }
142 
143 /* protected virtualization */
144 static void __init pv_init(void)
145 {
146 	if (!is_prot_virt_guest())
147 		return;
148 
149 	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
150 
151 	/* make sure bounce buffers are shared */
152 	swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
153 	swiotlb_update_mem_attributes();
154 }
155 
156 void __init arch_mm_preinit(void)
157 {
158 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
159 	cpumask_set_cpu(0, mm_cpumask(&init_mm));
160 
161 	pv_init();
162 
163 	setup_zero_pages();	/* Setup zeroed pages. */
164 }
165 
166 unsigned long memory_block_size_bytes(void)
167 {
168 	/*
169 	 * Make sure the memory block size is always greater
170 	 * or equal than the memory increment size.
171 	 */
172 	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
173 }
174 
175 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
176 EXPORT_SYMBOL(__per_cpu_offset);
177 
178 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
179 {
180 	return LOCAL_DISTANCE;
181 }
182 
183 static int __init pcpu_cpu_to_node(int cpu)
184 {
185 	return 0;
186 }
187 
188 void __init setup_per_cpu_areas(void)
189 {
190 	unsigned long delta;
191 	unsigned int cpu;
192 	int rc;
193 
194 	/*
195 	 * Always reserve area for module percpu variables.  That's
196 	 * what the legacy allocator did.
197 	 */
198 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
199 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
200 				    pcpu_cpu_distance,
201 				    pcpu_cpu_to_node);
202 	if (rc < 0)
203 		panic("Failed to initialize percpu areas.");
204 
205 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
206 	for_each_possible_cpu(cpu)
207 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
208 }
209 
210 #ifdef CONFIG_MEMORY_HOTPLUG
211 
212 #ifdef CONFIG_CMA
213 
214 /* Prevent memory blocks which contain cma regions from going offline */
215 
216 struct s390_cma_mem_data {
217 	unsigned long start;
218 	unsigned long end;
219 };
220 
221 static int s390_cma_check_range(struct cma *cma, void *data)
222 {
223 	struct s390_cma_mem_data *mem_data;
224 
225 	mem_data = data;
226 
227 	if (cma_intersects(cma, mem_data->start, mem_data->end))
228 		return -EBUSY;
229 
230 	return 0;
231 }
232 
233 static int s390_cma_mem_notifier(struct notifier_block *nb,
234 				 unsigned long action, void *data)
235 {
236 	struct s390_cma_mem_data mem_data;
237 	struct memory_notify *arg;
238 	int rc = 0;
239 
240 	arg = data;
241 	mem_data.start = arg->start_pfn << PAGE_SHIFT;
242 	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
243 	if (action == MEM_GOING_OFFLINE)
244 		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
245 	return notifier_from_errno(rc);
246 }
247 
248 static struct notifier_block s390_cma_mem_nb = {
249 	.notifier_call = s390_cma_mem_notifier,
250 };
251 
252 static int __init s390_cma_mem_init(void)
253 {
254 	return register_memory_notifier(&s390_cma_mem_nb);
255 }
256 device_initcall(s390_cma_mem_init);
257 
258 #endif /* CONFIG_CMA */
259 
260 int arch_add_memory(int nid, u64 start, u64 size,
261 		    struct mhp_params *params)
262 {
263 	unsigned long start_pfn = PFN_DOWN(start);
264 	unsigned long size_pages = PFN_DOWN(size);
265 	int rc;
266 
267 	if (WARN_ON_ONCE(pgprot_val(params->pgprot) != pgprot_val(PAGE_KERNEL)))
268 		return -EINVAL;
269 
270 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
271 	rc = vmem_add_mapping(start, size);
272 	if (rc)
273 		return rc;
274 
275 	rc = __add_pages(nid, start_pfn, size_pages, params);
276 	if (rc)
277 		vmem_remove_mapping(start, size);
278 	return rc;
279 }
280 
281 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
282 {
283 	unsigned long start_pfn = start >> PAGE_SHIFT;
284 	unsigned long nr_pages = size >> PAGE_SHIFT;
285 
286 	__remove_pages(start_pfn, nr_pages, altmap);
287 	vmem_remove_mapping(start, size);
288 }
289 #endif /* CONFIG_MEMORY_HOTPLUG */
290 
291 #ifdef CONFIG_EXECMEM
292 static struct execmem_info execmem_info __ro_after_init;
293 
294 struct execmem_info __init *execmem_arch_setup(void)
295 {
296 	unsigned long module_load_offset = 0;
297 	unsigned long start;
298 
299 	if (kaslr_enabled())
300 		module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
301 
302 	start = MODULES_VADDR + module_load_offset;
303 
304 	execmem_info = (struct execmem_info){
305 		.ranges = {
306 			[EXECMEM_DEFAULT] = {
307 				.flags	= EXECMEM_KASAN_SHADOW,
308 				.start	= start,
309 				.end	= MODULES_END,
310 				.pgprot	= PAGE_KERNEL,
311 				.alignment = MODULE_ALIGN,
312 			},
313 		},
314 	};
315 
316 	return &execmem_info;
317 }
318 #endif /* CONFIG_EXECMEM */
319