1 /* 2 * arch/s390/mm/init.c 3 * 4 * S390 version 5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/init.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/errno.h> 16 #include <linux/string.h> 17 #include <linux/types.h> 18 #include <linux/ptrace.h> 19 #include <linux/mman.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/smp.h> 23 #include <linux/init.h> 24 #include <linux/pagemap.h> 25 #include <linux/bootmem.h> 26 #include <linux/pfn.h> 27 #include <linux/poison.h> 28 #include <linux/initrd.h> 29 #include <linux/gfp.h> 30 #include <asm/processor.h> 31 #include <asm/system.h> 32 #include <asm/uaccess.h> 33 #include <asm/pgtable.h> 34 #include <asm/pgalloc.h> 35 #include <asm/dma.h> 36 #include <asm/lowcore.h> 37 #include <asm/tlb.h> 38 #include <asm/tlbflush.h> 39 #include <asm/sections.h> 40 41 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 42 43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); 44 45 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 46 EXPORT_SYMBOL(empty_zero_page); 47 48 /* 49 * paging_init() sets up the page tables 50 */ 51 void __init paging_init(void) 52 { 53 static const int ssm_mask = 0x04000000L; 54 unsigned long max_zone_pfns[MAX_NR_ZONES]; 55 unsigned long pgd_type; 56 57 init_mm.pgd = swapper_pg_dir; 58 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; 59 #ifdef CONFIG_64BIT 60 /* A three level page table (4TB) is enough for the kernel space. */ 61 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 62 pgd_type = _REGION3_ENTRY_EMPTY; 63 #else 64 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; 65 pgd_type = _SEGMENT_ENTRY_EMPTY; 66 #endif 67 clear_table((unsigned long *) init_mm.pgd, pgd_type, 68 sizeof(unsigned long)*2048); 69 vmem_map_init(); 70 71 /* enable virtual mapping in kernel mode */ 72 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 73 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 74 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 75 __raw_local_irq_ssm(ssm_mask); 76 77 atomic_set(&init_mm.context.attach_count, 1); 78 79 sparse_memory_present_with_active_regions(MAX_NUMNODES); 80 sparse_init(); 81 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 82 #ifdef CONFIG_ZONE_DMA 83 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 84 #endif 85 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 86 free_area_init_nodes(max_zone_pfns); 87 } 88 89 void __init mem_init(void) 90 { 91 unsigned long codesize, reservedpages, datasize, initsize; 92 93 max_mapnr = num_physpages = max_low_pfn; 94 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 95 96 /* clear the zero-page */ 97 memset(empty_zero_page, 0, PAGE_SIZE); 98 99 /* Setup guest page hinting */ 100 cmma_init(); 101 102 /* this will put all low memory onto the freelists */ 103 totalram_pages += free_all_bootmem(); 104 105 reservedpages = 0; 106 107 codesize = (unsigned long) &_etext - (unsigned long) &_text; 108 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 109 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 110 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", 111 nr_free_pages() << (PAGE_SHIFT-10), 112 max_mapnr << (PAGE_SHIFT-10), 113 codesize >> 10, 114 reservedpages << (PAGE_SHIFT-10), 115 datasize >>10, 116 initsize >> 10); 117 printk("Write protected kernel read-only data: %#lx - %#lx\n", 118 (unsigned long)&_stext, 119 PFN_ALIGN((unsigned long)&_eshared) - 1); 120 } 121 122 #ifdef CONFIG_DEBUG_PAGEALLOC 123 void kernel_map_pages(struct page *page, int numpages, int enable) 124 { 125 pgd_t *pgd; 126 pud_t *pud; 127 pmd_t *pmd; 128 pte_t *pte; 129 unsigned long address; 130 int i; 131 132 for (i = 0; i < numpages; i++) { 133 address = page_to_phys(page + i); 134 pgd = pgd_offset_k(address); 135 pud = pud_offset(pgd, address); 136 pmd = pmd_offset(pud, address); 137 pte = pte_offset_kernel(pmd, address); 138 if (!enable) { 139 ptep_invalidate(&init_mm, address, pte); 140 continue; 141 } 142 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 143 /* Flush cpu write queue. */ 144 mb(); 145 } 146 } 147 #endif 148 149 void free_init_pages(char *what, unsigned long begin, unsigned long end) 150 { 151 unsigned long addr = begin; 152 153 if (begin >= end) 154 return; 155 for (; addr < end; addr += PAGE_SIZE) { 156 ClearPageReserved(virt_to_page(addr)); 157 init_page_count(virt_to_page(addr)); 158 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, 159 PAGE_SIZE); 160 free_page(addr); 161 totalram_pages++; 162 } 163 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 164 } 165 166 void free_initmem(void) 167 { 168 free_init_pages("unused kernel memory", 169 (unsigned long)&__init_begin, 170 (unsigned long)&__init_end); 171 } 172 173 #ifdef CONFIG_BLK_DEV_INITRD 174 void free_initrd_mem(unsigned long start, unsigned long end) 175 { 176 free_init_pages("initrd memory", start, end); 177 } 178 #endif 179 180 #ifdef CONFIG_MEMORY_HOTPLUG 181 int arch_add_memory(int nid, u64 start, u64 size) 182 { 183 struct pglist_data *pgdat; 184 struct zone *zone; 185 int rc; 186 187 pgdat = NODE_DATA(nid); 188 zone = pgdat->node_zones + ZONE_MOVABLE; 189 rc = vmem_add_mapping(start, size); 190 if (rc) 191 return rc; 192 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); 193 if (rc) 194 vmem_remove_mapping(start, size); 195 return rc; 196 } 197 #endif /* CONFIG_MEMORY_HOTPLUG */ 198