xref: /linux/arch/s390/mm/init.c (revision 6eb2fb3170549737207974c2c6ad34bcc2f3025e)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  *
6  *  Derived from "arch/i386/mm/init.c"
7  *    Copyright (C) 1995  Linus Torvalds
8  */
9 
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/memory.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/initrd.h>
28 #include <linux/export.h>
29 #include <linux/gfp.h>
30 #include <asm/processor.h>
31 #include <asm/uaccess.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/dma.h>
35 #include <asm/lowcore.h>
36 #include <asm/tlb.h>
37 #include <asm/tlbflush.h>
38 #include <asm/sections.h>
39 #include <asm/ctl_reg.h>
40 #include <asm/sclp.h>
41 
42 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
43 
44 unsigned long empty_zero_page, zero_page_mask;
45 EXPORT_SYMBOL(empty_zero_page);
46 
47 static void __init setup_zero_pages(void)
48 {
49 	struct cpuid cpu_id;
50 	unsigned int order;
51 	struct page *page;
52 	int i;
53 
54 	get_cpu_id(&cpu_id);
55 	switch (cpu_id.machine) {
56 	case 0x9672:	/* g5 */
57 	case 0x2064:	/* z900 */
58 	case 0x2066:	/* z900 */
59 	case 0x2084:	/* z990 */
60 	case 0x2086:	/* z990 */
61 	case 0x2094:	/* z9-109 */
62 	case 0x2096:	/* z9-109 */
63 		order = 0;
64 		break;
65 	case 0x2097:	/* z10 */
66 	case 0x2098:	/* z10 */
67 	case 0x2817:	/* z196 */
68 	case 0x2818:	/* z196 */
69 		order = 2;
70 		break;
71 	case 0x2827:	/* zEC12 */
72 	default:
73 		order = 5;
74 		break;
75 	}
76 	/* Limit number of empty zero pages for small memory sizes */
77 	if (order > 2 && totalram_pages <= 16384)
78 		order = 2;
79 
80 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
81 	if (!empty_zero_page)
82 		panic("Out of memory in setup_zero_pages");
83 
84 	page = virt_to_page((void *) empty_zero_page);
85 	split_page(page, order);
86 	for (i = 1 << order; i > 0; i--) {
87 		mark_page_reserved(page);
88 		page++;
89 	}
90 
91 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
92 }
93 
94 /*
95  * paging_init() sets up the page tables
96  */
97 void __init paging_init(void)
98 {
99 	unsigned long max_zone_pfns[MAX_NR_ZONES];
100 	unsigned long pgd_type, asce_bits;
101 
102 	init_mm.pgd = swapper_pg_dir;
103 #ifdef CONFIG_64BIT
104 	if (VMALLOC_END > (1UL << 42)) {
105 		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
106 		pgd_type = _REGION2_ENTRY_EMPTY;
107 	} else {
108 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
109 		pgd_type = _REGION3_ENTRY_EMPTY;
110 	}
111 #else
112 	asce_bits = _ASCE_TABLE_LENGTH;
113 	pgd_type = _SEGMENT_ENTRY_EMPTY;
114 #endif
115 	S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
116 	clear_table((unsigned long *) init_mm.pgd, pgd_type,
117 		    sizeof(unsigned long)*2048);
118 	vmem_map_init();
119 
120         /* enable virtual mapping in kernel mode */
121 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
122 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
123 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
124 	arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
125 
126 	atomic_set(&init_mm.context.attach_count, 1);
127 
128 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
129 	sparse_init();
130 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
131 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
132 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
133 	free_area_init_nodes(max_zone_pfns);
134 }
135 
136 void __init mem_init(void)
137 {
138 	unsigned long codesize, reservedpages, datasize, initsize;
139 
140         max_mapnr = num_physpages = max_low_pfn;
141         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
142 
143 	/* Setup guest page hinting */
144 	cmma_init();
145 
146 	/* this will put all low memory onto the freelists */
147 	totalram_pages += free_all_bootmem();
148 	setup_zero_pages();	/* Setup zeroed pages. */
149 
150 	reservedpages = 0;
151 
152 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
153 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
154 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
155         printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
156 		nr_free_pages() << (PAGE_SHIFT-10),
157                 max_mapnr << (PAGE_SHIFT-10),
158                 codesize >> 10,
159                 reservedpages << (PAGE_SHIFT-10),
160                 datasize >>10,
161                 initsize >> 10);
162 	printk("Write protected kernel read-only data: %#lx - %#lx\n",
163 	       (unsigned long)&_stext,
164 	       PFN_ALIGN((unsigned long)&_eshared) - 1);
165 }
166 
167 void free_initmem(void)
168 {
169 	free_initmem_default(0);
170 }
171 
172 #ifdef CONFIG_BLK_DEV_INITRD
173 void __init free_initrd_mem(unsigned long start, unsigned long end)
174 {
175 	free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
176 }
177 #endif
178 
179 #ifdef CONFIG_MEMORY_HOTPLUG
180 int arch_add_memory(int nid, u64 start, u64 size)
181 {
182 	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
183 	unsigned long start_pfn = PFN_DOWN(start);
184 	unsigned long size_pages = PFN_DOWN(size);
185 	struct zone *zone;
186 	int rc;
187 
188 	rc = vmem_add_mapping(start, size);
189 	if (rc)
190 		return rc;
191 	for_each_zone(zone) {
192 		if (zone_idx(zone) != ZONE_MOVABLE) {
193 			/* Add range within existing zone limits */
194 			zone_start_pfn = zone->zone_start_pfn;
195 			zone_end_pfn = zone->zone_start_pfn +
196 				       zone->spanned_pages;
197 		} else {
198 			/* Add remaining range to ZONE_MOVABLE */
199 			zone_start_pfn = start_pfn;
200 			zone_end_pfn = start_pfn + size_pages;
201 		}
202 		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
203 			continue;
204 		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
205 			   zone_end_pfn - start_pfn : size_pages;
206 		rc = __add_pages(nid, zone, start_pfn, nr_pages);
207 		if (rc)
208 			break;
209 		start_pfn += nr_pages;
210 		size_pages -= nr_pages;
211 		if (!size_pages)
212 			break;
213 	}
214 	if (rc)
215 		vmem_remove_mapping(start, size);
216 	return rc;
217 }
218 
219 unsigned long memory_block_size_bytes(void)
220 {
221 	/*
222 	 * Make sure the memory block size is always greater
223 	 * or equal than the memory increment size.
224 	 */
225 	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
226 }
227 
228 #ifdef CONFIG_MEMORY_HOTREMOVE
229 int arch_remove_memory(u64 start, u64 size)
230 {
231 	/*
232 	 * There is no hardware or firmware interface which could trigger a
233 	 * hot memory remove on s390. So there is nothing that needs to be
234 	 * implemented.
235 	 */
236 	return -EBUSY;
237 }
238 #endif
239 #endif /* CONFIG_MEMORY_HOTPLUG */
240