1 /* 2 * arch/s390/mm/init.c 3 * 4 * S390 version 5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/init.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/errno.h> 16 #include <linux/string.h> 17 #include <linux/types.h> 18 #include <linux/ptrace.h> 19 #include <linux/mman.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/smp.h> 23 #include <linux/init.h> 24 #include <linux/pagemap.h> 25 #include <linux/bootmem.h> 26 #include <linux/pfn.h> 27 #include <linux/poison.h> 28 #include <linux/initrd.h> 29 #include <linux/gfp.h> 30 #include <asm/processor.h> 31 #include <asm/system.h> 32 #include <asm/uaccess.h> 33 #include <asm/pgtable.h> 34 #include <asm/pgalloc.h> 35 #include <asm/dma.h> 36 #include <asm/lowcore.h> 37 #include <asm/tlb.h> 38 #include <asm/tlbflush.h> 39 #include <asm/sections.h> 40 41 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 42 43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); 44 45 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 46 EXPORT_SYMBOL(empty_zero_page); 47 48 /* 49 * paging_init() sets up the page tables 50 */ 51 void __init paging_init(void) 52 { 53 unsigned long max_zone_pfns[MAX_NR_ZONES]; 54 unsigned long pgd_type; 55 56 init_mm.pgd = swapper_pg_dir; 57 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; 58 #ifdef CONFIG_64BIT 59 /* A three level page table (4TB) is enough for the kernel space. */ 60 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 61 pgd_type = _REGION3_ENTRY_EMPTY; 62 #else 63 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; 64 pgd_type = _SEGMENT_ENTRY_EMPTY; 65 #endif 66 clear_table((unsigned long *) init_mm.pgd, pgd_type, 67 sizeof(unsigned long)*2048); 68 vmem_map_init(); 69 70 /* enable virtual mapping in kernel mode */ 71 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 72 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 73 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 74 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); 75 76 atomic_set(&init_mm.context.attach_count, 1); 77 78 sparse_memory_present_with_active_regions(MAX_NUMNODES); 79 sparse_init(); 80 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 81 #ifdef CONFIG_ZONE_DMA 82 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 83 #endif 84 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 85 free_area_init_nodes(max_zone_pfns); 86 } 87 88 void __init mem_init(void) 89 { 90 unsigned long codesize, reservedpages, datasize, initsize; 91 92 max_mapnr = num_physpages = max_low_pfn; 93 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 94 95 /* clear the zero-page */ 96 memset(empty_zero_page, 0, PAGE_SIZE); 97 98 /* Setup guest page hinting */ 99 cmma_init(); 100 101 /* this will put all low memory onto the freelists */ 102 totalram_pages += free_all_bootmem(); 103 104 reservedpages = 0; 105 106 codesize = (unsigned long) &_etext - (unsigned long) &_text; 107 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 108 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 109 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", 110 nr_free_pages() << (PAGE_SHIFT-10), 111 max_mapnr << (PAGE_SHIFT-10), 112 codesize >> 10, 113 reservedpages << (PAGE_SHIFT-10), 114 datasize >>10, 115 initsize >> 10); 116 printk("Write protected kernel read-only data: %#lx - %#lx\n", 117 (unsigned long)&_stext, 118 PFN_ALIGN((unsigned long)&_eshared) - 1); 119 } 120 121 #ifdef CONFIG_DEBUG_PAGEALLOC 122 void kernel_map_pages(struct page *page, int numpages, int enable) 123 { 124 pgd_t *pgd; 125 pud_t *pud; 126 pmd_t *pmd; 127 pte_t *pte; 128 unsigned long address; 129 int i; 130 131 for (i = 0; i < numpages; i++) { 132 address = page_to_phys(page + i); 133 pgd = pgd_offset_k(address); 134 pud = pud_offset(pgd, address); 135 pmd = pmd_offset(pud, address); 136 pte = pte_offset_kernel(pmd, address); 137 if (!enable) { 138 ptep_invalidate(&init_mm, address, pte); 139 continue; 140 } 141 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 142 /* Flush cpu write queue. */ 143 mb(); 144 } 145 } 146 #endif 147 148 void free_init_pages(char *what, unsigned long begin, unsigned long end) 149 { 150 unsigned long addr = begin; 151 152 if (begin >= end) 153 return; 154 for (; addr < end; addr += PAGE_SIZE) { 155 ClearPageReserved(virt_to_page(addr)); 156 init_page_count(virt_to_page(addr)); 157 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, 158 PAGE_SIZE); 159 free_page(addr); 160 totalram_pages++; 161 } 162 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 163 } 164 165 void free_initmem(void) 166 { 167 free_init_pages("unused kernel memory", 168 (unsigned long)&__init_begin, 169 (unsigned long)&__init_end); 170 } 171 172 #ifdef CONFIG_BLK_DEV_INITRD 173 void free_initrd_mem(unsigned long start, unsigned long end) 174 { 175 free_init_pages("initrd memory", start, end); 176 } 177 #endif 178 179 #ifdef CONFIG_MEMORY_HOTPLUG 180 int arch_add_memory(int nid, u64 start, u64 size) 181 { 182 struct pglist_data *pgdat; 183 struct zone *zone; 184 int rc; 185 186 pgdat = NODE_DATA(nid); 187 zone = pgdat->node_zones + ZONE_MOVABLE; 188 rc = vmem_add_mapping(start, size); 189 if (rc) 190 return rc; 191 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); 192 if (rc) 193 vmem_remove_mapping(start, size); 194 return rc; 195 } 196 #endif /* CONFIG_MEMORY_HOTPLUG */ 197