1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11 #include <linux/cpufeature.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/swiotlb.h>
23 #include <linux/smp.h>
24 #include <linux/init.h>
25 #include <linux/pagemap.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/pfn.h>
29 #include <linux/poison.h>
30 #include <linux/initrd.h>
31 #include <linux/export.h>
32 #include <linux/cma.h>
33 #include <linux/gfp.h>
34 #include <linux/dma-direct.h>
35 #include <linux/percpu.h>
36 #include <asm/processor.h>
37 #include <linux/uaccess.h>
38 #include <asm/pgalloc.h>
39 #include <asm/ctlreg.h>
40 #include <asm/kfence.h>
41 #include <asm/dma.h>
42 #include <asm/abs_lowcore.h>
43 #include <asm/tlbflush.h>
44 #include <asm/sections.h>
45 #include <asm/sclp.h>
46 #include <asm/set_memory.h>
47 #include <asm/kasan.h>
48 #include <asm/dma-mapping.h>
49 #include <asm/uv.h>
50 #include <linux/virtio_anchor.h>
51 #include <linux/virtio_config.h>
52 #include <linux/execmem.h>
53
54 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55 pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
56
57 struct ctlreg __bootdata_preserved(s390_invalid_asce);
58
59 unsigned long __bootdata_preserved(page_noexec_mask);
60 EXPORT_SYMBOL(page_noexec_mask);
61
62 unsigned long __bootdata_preserved(segment_noexec_mask);
63 EXPORT_SYMBOL(segment_noexec_mask);
64
65 unsigned long __bootdata_preserved(region_noexec_mask);
66 EXPORT_SYMBOL(region_noexec_mask);
67
68 unsigned long empty_zero_page, zero_page_mask;
69 EXPORT_SYMBOL(empty_zero_page);
70 EXPORT_SYMBOL(zero_page_mask);
71
setup_zero_pages(void)72 static void __init setup_zero_pages(void)
73 {
74 unsigned long total_pages = memblock_estimated_nr_free_pages();
75 unsigned int order;
76
77 /* Latest machines require a mapping granularity of 512KB */
78 order = 7;
79
80 /* Limit number of empty zero pages for small memory sizes */
81 while (order > 2 && (total_pages >> 10) < (1UL << order))
82 order--;
83
84 empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
85
86 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
87 }
88
89 /*
90 * paging_init() sets up the page tables
91 */
paging_init(void)92 void __init paging_init(void)
93 {
94 unsigned long max_zone_pfns[MAX_NR_ZONES];
95
96 vmem_map_init();
97 sparse_init();
98 zone_dma_limit = DMA_BIT_MASK(31);
99 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
100 max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
101 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
102 free_area_init(max_zone_pfns);
103 }
104
mark_rodata_ro(void)105 void mark_rodata_ro(void)
106 {
107 unsigned long size = __end_ro_after_init - __start_ro_after_init;
108
109 if (cpu_has_nx())
110 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
111 __set_memory_ro(__start_ro_after_init, __end_ro_after_init);
112 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
113 }
114
set_memory_encrypted(unsigned long vaddr,int numpages)115 int set_memory_encrypted(unsigned long vaddr, int numpages)
116 {
117 int i;
118
119 /* make specified pages unshared, (swiotlb, dma_free) */
120 for (i = 0; i < numpages; ++i) {
121 uv_remove_shared(virt_to_phys((void *)vaddr));
122 vaddr += PAGE_SIZE;
123 }
124 return 0;
125 }
126
set_memory_decrypted(unsigned long vaddr,int numpages)127 int set_memory_decrypted(unsigned long vaddr, int numpages)
128 {
129 int i;
130 /* make specified pages shared (swiotlb, dma_alloca) */
131 for (i = 0; i < numpages; ++i) {
132 uv_set_shared(virt_to_phys((void *)vaddr));
133 vaddr += PAGE_SIZE;
134 }
135 return 0;
136 }
137
138 /* are we a protected virtualization guest? */
force_dma_unencrypted(struct device * dev)139 bool force_dma_unencrypted(struct device *dev)
140 {
141 return is_prot_virt_guest();
142 }
143
144 /* protected virtualization */
pv_init(void)145 static void __init pv_init(void)
146 {
147 if (!is_prot_virt_guest())
148 return;
149
150 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
151
152 /* make sure bounce buffers are shared */
153 swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
154 swiotlb_update_mem_attributes();
155 }
156
arch_mm_preinit(void)157 void __init arch_mm_preinit(void)
158 {
159 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
160 cpumask_set_cpu(0, mm_cpumask(&init_mm));
161
162 pv_init();
163
164 setup_zero_pages(); /* Setup zeroed pages. */
165 }
166
memory_block_size_bytes(void)167 unsigned long memory_block_size_bytes(void)
168 {
169 /*
170 * Make sure the memory block size is always greater
171 * or equal than the memory increment size.
172 */
173 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
174 }
175
176 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
177 EXPORT_SYMBOL(__per_cpu_offset);
178
pcpu_cpu_distance(unsigned int from,unsigned int to)179 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
180 {
181 return LOCAL_DISTANCE;
182 }
183
pcpu_cpu_to_node(int cpu)184 static int __init pcpu_cpu_to_node(int cpu)
185 {
186 return 0;
187 }
188
setup_per_cpu_areas(void)189 void __init setup_per_cpu_areas(void)
190 {
191 unsigned long delta;
192 unsigned int cpu;
193 int rc;
194
195 /*
196 * Always reserve area for module percpu variables. That's
197 * what the legacy allocator did.
198 */
199 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
200 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
201 pcpu_cpu_distance,
202 pcpu_cpu_to_node);
203 if (rc < 0)
204 panic("Failed to initialize percpu areas.");
205
206 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
207 for_each_possible_cpu(cpu)
208 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
209 }
210
211 #ifdef CONFIG_MEMORY_HOTPLUG
212
213 #ifdef CONFIG_CMA
214
215 /* Prevent memory blocks which contain cma regions from going offline */
216
217 struct s390_cma_mem_data {
218 unsigned long start;
219 unsigned long end;
220 };
221
s390_cma_check_range(struct cma * cma,void * data)222 static int s390_cma_check_range(struct cma *cma, void *data)
223 {
224 struct s390_cma_mem_data *mem_data;
225
226 mem_data = data;
227
228 if (cma_intersects(cma, mem_data->start, mem_data->end))
229 return -EBUSY;
230
231 return 0;
232 }
233
s390_cma_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)234 static int s390_cma_mem_notifier(struct notifier_block *nb,
235 unsigned long action, void *data)
236 {
237 struct s390_cma_mem_data mem_data;
238 struct memory_notify *arg;
239 int rc = 0;
240
241 arg = data;
242 mem_data.start = arg->start_pfn << PAGE_SHIFT;
243 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
244 if (action == MEM_GOING_OFFLINE)
245 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
246 return notifier_from_errno(rc);
247 }
248
249 static struct notifier_block s390_cma_mem_nb = {
250 .notifier_call = s390_cma_mem_notifier,
251 };
252
s390_cma_mem_init(void)253 static int __init s390_cma_mem_init(void)
254 {
255 return register_memory_notifier(&s390_cma_mem_nb);
256 }
257 device_initcall(s390_cma_mem_init);
258
259 #endif /* CONFIG_CMA */
260
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)261 int arch_add_memory(int nid, u64 start, u64 size,
262 struct mhp_params *params)
263 {
264 unsigned long start_pfn = PFN_DOWN(start);
265 unsigned long size_pages = PFN_DOWN(size);
266 int rc;
267
268 if (WARN_ON_ONCE(pgprot_val(params->pgprot) != pgprot_val(PAGE_KERNEL)))
269 return -EINVAL;
270
271 VM_BUG_ON(!mhp_range_allowed(start, size, true));
272 rc = vmem_add_mapping(start, size);
273 if (rc)
274 return rc;
275
276 rc = __add_pages(nid, start_pfn, size_pages, params);
277 if (rc)
278 vmem_remove_mapping(start, size);
279 return rc;
280 }
281
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)282 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
283 {
284 unsigned long start_pfn = start >> PAGE_SHIFT;
285 unsigned long nr_pages = size >> PAGE_SHIFT;
286
287 __remove_pages(start_pfn, nr_pages, altmap);
288 vmem_remove_mapping(start, size);
289 }
290 #endif /* CONFIG_MEMORY_HOTPLUG */
291
292 #ifdef CONFIG_EXECMEM
293 static struct execmem_info execmem_info __ro_after_init;
294
execmem_arch_setup(void)295 struct execmem_info __init *execmem_arch_setup(void)
296 {
297 unsigned long module_load_offset = 0;
298 unsigned long start;
299
300 if (kaslr_enabled())
301 module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
302
303 start = MODULES_VADDR + module_load_offset;
304
305 execmem_info = (struct execmem_info){
306 .ranges = {
307 [EXECMEM_DEFAULT] = {
308 .flags = EXECMEM_KASAN_SHADOW,
309 .start = start,
310 .end = MODULES_END,
311 .pgprot = PAGE_KERNEL,
312 .alignment = MODULE_ALIGN,
313 },
314 },
315 };
316
317 return &execmem_info;
318 }
319 #endif /* CONFIG_EXECMEM */
320