xref: /linux/arch/s390/mm/init.c (revision 9557b4376d02088a33e5f4116bcc324d35a3b64c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *
7  *  Derived from "arch/i386/mm/init.c"
8  *    Copyright (C) 1995  Linus Torvalds
9  */
10 
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/swiotlb.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/memblock.h>
26 #include <linux/memory.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/initrd.h>
30 #include <linux/export.h>
31 #include <linux/cma.h>
32 #include <linux/gfp.h>
33 #include <linux/dma-direct.h>
34 #include <linux/percpu.h>
35 #include <asm/processor.h>
36 #include <linux/uaccess.h>
37 #include <asm/pgalloc.h>
38 #include <asm/ctlreg.h>
39 #include <asm/kfence.h>
40 #include <asm/dma.h>
41 #include <asm/abs_lowcore.h>
42 #include <asm/tlb.h>
43 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
45 #include <asm/sclp.h>
46 #include <asm/set_memory.h>
47 #include <asm/kasan.h>
48 #include <asm/dma-mapping.h>
49 #include <asm/uv.h>
50 #include <linux/virtio_anchor.h>
51 #include <linux/virtio_config.h>
52 #include <linux/execmem.h>
53 
54 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55 pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
56 
57 struct ctlreg __bootdata_preserved(s390_invalid_asce);
58 
59 unsigned long empty_zero_page, zero_page_mask;
60 EXPORT_SYMBOL(empty_zero_page);
61 EXPORT_SYMBOL(zero_page_mask);
62 
63 static void __init setup_zero_pages(void)
64 {
65 	unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
66 	unsigned int order;
67 	struct page *page;
68 	int i;
69 
70 	/* Latest machines require a mapping granularity of 512KB */
71 	order = 7;
72 
73 	/* Limit number of empty zero pages for small memory sizes */
74 	while (order > 2 && (total_pages >> 10) < (1UL << order))
75 		order--;
76 
77 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
78 	if (!empty_zero_page)
79 		panic("Out of memory in setup_zero_pages");
80 
81 	page = virt_to_page((void *) empty_zero_page);
82 	split_page(page, order);
83 	for (i = 1 << order; i > 0; i--) {
84 		mark_page_reserved(page);
85 		page++;
86 	}
87 
88 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
89 }
90 
91 /*
92  * paging_init() sets up the page tables
93  */
94 void __init paging_init(void)
95 {
96 	unsigned long max_zone_pfns[MAX_NR_ZONES];
97 
98 	vmem_map_init();
99 	sparse_init();
100 	zone_dma_bits = 31;
101 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
102 	max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
103 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
104 	free_area_init(max_zone_pfns);
105 }
106 
107 void mark_rodata_ro(void)
108 {
109 	unsigned long size = __end_ro_after_init - __start_ro_after_init;
110 
111 	__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
112 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
113 }
114 
115 int set_memory_encrypted(unsigned long vaddr, int numpages)
116 {
117 	int i;
118 
119 	/* make specified pages unshared, (swiotlb, dma_free) */
120 	for (i = 0; i < numpages; ++i) {
121 		uv_remove_shared(virt_to_phys((void *)vaddr));
122 		vaddr += PAGE_SIZE;
123 	}
124 	return 0;
125 }
126 
127 int set_memory_decrypted(unsigned long vaddr, int numpages)
128 {
129 	int i;
130 	/* make specified pages shared (swiotlb, dma_alloca) */
131 	for (i = 0; i < numpages; ++i) {
132 		uv_set_shared(virt_to_phys((void *)vaddr));
133 		vaddr += PAGE_SIZE;
134 	}
135 	return 0;
136 }
137 
138 /* are we a protected virtualization guest? */
139 bool force_dma_unencrypted(struct device *dev)
140 {
141 	return is_prot_virt_guest();
142 }
143 
144 /* protected virtualization */
145 static void pv_init(void)
146 {
147 	if (!is_prot_virt_guest())
148 		return;
149 
150 	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
151 
152 	/* make sure bounce buffers are shared */
153 	swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
154 	swiotlb_update_mem_attributes();
155 }
156 
157 void __init mem_init(void)
158 {
159 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
160 	cpumask_set_cpu(0, mm_cpumask(&init_mm));
161 
162 	set_max_mapnr(max_low_pfn);
163         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
164 
165 	pv_init();
166 	kfence_split_mapping();
167 
168 	/* this will put all low memory onto the freelists */
169 	memblock_free_all();
170 	setup_zero_pages();	/* Setup zeroed pages. */
171 }
172 
173 void free_initmem(void)
174 {
175 	set_memory_rwnx((unsigned long)_sinittext,
176 			(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
177 	free_initmem_default(POISON_FREE_INITMEM);
178 }
179 
180 unsigned long memory_block_size_bytes(void)
181 {
182 	/*
183 	 * Make sure the memory block size is always greater
184 	 * or equal than the memory increment size.
185 	 */
186 	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
187 }
188 
189 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
190 EXPORT_SYMBOL(__per_cpu_offset);
191 
192 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
193 {
194 	return LOCAL_DISTANCE;
195 }
196 
197 static int __init pcpu_cpu_to_node(int cpu)
198 {
199 	return 0;
200 }
201 
202 void __init setup_per_cpu_areas(void)
203 {
204 	unsigned long delta;
205 	unsigned int cpu;
206 	int rc;
207 
208 	/*
209 	 * Always reserve area for module percpu variables.  That's
210 	 * what the legacy allocator did.
211 	 */
212 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
213 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
214 				    pcpu_cpu_distance,
215 				    pcpu_cpu_to_node);
216 	if (rc < 0)
217 		panic("Failed to initialize percpu areas.");
218 
219 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
220 	for_each_possible_cpu(cpu)
221 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
222 }
223 
224 #ifdef CONFIG_MEMORY_HOTPLUG
225 
226 #ifdef CONFIG_CMA
227 
228 /* Prevent memory blocks which contain cma regions from going offline */
229 
230 struct s390_cma_mem_data {
231 	unsigned long start;
232 	unsigned long end;
233 };
234 
235 static int s390_cma_check_range(struct cma *cma, void *data)
236 {
237 	struct s390_cma_mem_data *mem_data;
238 	unsigned long start, end;
239 
240 	mem_data = data;
241 	start = cma_get_base(cma);
242 	end = start + cma_get_size(cma);
243 	if (end < mem_data->start)
244 		return 0;
245 	if (start >= mem_data->end)
246 		return 0;
247 	return -EBUSY;
248 }
249 
250 static int s390_cma_mem_notifier(struct notifier_block *nb,
251 				 unsigned long action, void *data)
252 {
253 	struct s390_cma_mem_data mem_data;
254 	struct memory_notify *arg;
255 	int rc = 0;
256 
257 	arg = data;
258 	mem_data.start = arg->start_pfn << PAGE_SHIFT;
259 	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
260 	if (action == MEM_GOING_OFFLINE)
261 		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
262 	return notifier_from_errno(rc);
263 }
264 
265 static struct notifier_block s390_cma_mem_nb = {
266 	.notifier_call = s390_cma_mem_notifier,
267 };
268 
269 static int __init s390_cma_mem_init(void)
270 {
271 	return register_memory_notifier(&s390_cma_mem_nb);
272 }
273 device_initcall(s390_cma_mem_init);
274 
275 #endif /* CONFIG_CMA */
276 
277 int arch_add_memory(int nid, u64 start, u64 size,
278 		    struct mhp_params *params)
279 {
280 	unsigned long start_pfn = PFN_DOWN(start);
281 	unsigned long size_pages = PFN_DOWN(size);
282 	int rc;
283 
284 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
285 		return -EINVAL;
286 
287 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
288 	rc = vmem_add_mapping(start, size);
289 	if (rc)
290 		return rc;
291 
292 	rc = __add_pages(nid, start_pfn, size_pages, params);
293 	if (rc)
294 		vmem_remove_mapping(start, size);
295 	return rc;
296 }
297 
298 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
299 {
300 	unsigned long start_pfn = start >> PAGE_SHIFT;
301 	unsigned long nr_pages = size >> PAGE_SHIFT;
302 
303 	__remove_pages(start_pfn, nr_pages, altmap);
304 	vmem_remove_mapping(start, size);
305 }
306 #endif /* CONFIG_MEMORY_HOTPLUG */
307 
308 #ifdef CONFIG_EXECMEM
309 static struct execmem_info execmem_info __ro_after_init;
310 
311 struct execmem_info __init *execmem_arch_setup(void)
312 {
313 	unsigned long module_load_offset = 0;
314 	unsigned long start;
315 
316 	if (kaslr_enabled())
317 		module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
318 
319 	start = MODULES_VADDR + module_load_offset;
320 
321 	execmem_info = (struct execmem_info){
322 		.ranges = {
323 			[EXECMEM_DEFAULT] = {
324 				.flags	= EXECMEM_KASAN_SHADOW,
325 				.start	= start,
326 				.end	= MODULES_END,
327 				.pgprot	= PAGE_KERNEL,
328 				.alignment = MODULE_ALIGN,
329 			},
330 		},
331 	};
332 
333 	return &execmem_info;
334 }
335 #endif /* CONFIG_EXECMEM */
336