1 /* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2010 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/init.h> 13 #include <linux/gfp.h> 14 #include <linux/bootmem.h> 15 #include <linux/proc_fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/percpu.h> 18 #include <linux/io.h> 19 #include <linux/memblock.h> 20 #include <linux/dma-mapping.h> 21 #include <asm/mmu_context.h> 22 #include <asm/mmzone.h> 23 #include <asm/kexec.h> 24 #include <asm/tlb.h> 25 #include <asm/cacheflush.h> 26 #include <asm/sections.h> 27 #include <asm/setup.h> 28 #include <asm/cache.h> 29 #include <asm/sizes.h> 30 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34 void __init generic_mem_init(void) 35 { 36 memblock_add(__MEMORY_START, __MEMORY_SIZE); 37 } 38 39 void __init __weak plat_mem_setup(void) 40 { 41 /* Nothing to see here, move along. */ 42 } 43 44 #ifdef CONFIG_MMU 45 static pte_t *__get_pte_phys(unsigned long addr) 46 { 47 pgd_t *pgd; 48 pud_t *pud; 49 pmd_t *pmd; 50 pte_t *pte; 51 52 pgd = pgd_offset_k(addr); 53 if (pgd_none(*pgd)) { 54 pgd_ERROR(*pgd); 55 return NULL; 56 } 57 58 pud = pud_alloc(NULL, pgd, addr); 59 if (unlikely(!pud)) { 60 pud_ERROR(*pud); 61 return NULL; 62 } 63 64 pmd = pmd_alloc(NULL, pud, addr); 65 if (unlikely(!pmd)) { 66 pmd_ERROR(*pmd); 67 return NULL; 68 } 69 70 pte = pte_offset_kernel(pmd, addr); 71 return pte; 72 } 73 74 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 75 { 76 pte_t *pte; 77 78 pte = __get_pte_phys(addr); 79 if (!pte_none(*pte)) { 80 pte_ERROR(*pte); 81 return; 82 } 83 84 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 85 local_flush_tlb_one(get_asid(), addr); 86 87 if (pgprot_val(prot) & _PAGE_WIRED) 88 tlb_wire_entry(NULL, addr, *pte); 89 } 90 91 static void clear_pte_phys(unsigned long addr, pgprot_t prot) 92 { 93 pte_t *pte; 94 95 pte = __get_pte_phys(addr); 96 97 if (pgprot_val(prot) & _PAGE_WIRED) 98 tlb_unwire_entry(); 99 100 set_pte(pte, pfn_pte(0, __pgprot(0))); 101 local_flush_tlb_one(get_asid(), addr); 102 } 103 104 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 105 { 106 unsigned long address = __fix_to_virt(idx); 107 108 if (idx >= __end_of_fixed_addresses) { 109 BUG(); 110 return; 111 } 112 113 set_pte_phys(address, phys, prot); 114 } 115 116 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) 117 { 118 unsigned long address = __fix_to_virt(idx); 119 120 if (idx >= __end_of_fixed_addresses) { 121 BUG(); 122 return; 123 } 124 125 clear_pte_phys(address, prot); 126 } 127 128 void __init page_table_range_init(unsigned long start, unsigned long end, 129 pgd_t *pgd_base) 130 { 131 pgd_t *pgd; 132 pud_t *pud; 133 pmd_t *pmd; 134 pte_t *pte; 135 int i, j, k; 136 unsigned long vaddr; 137 138 vaddr = start; 139 i = __pgd_offset(vaddr); 140 j = __pud_offset(vaddr); 141 k = __pmd_offset(vaddr); 142 pgd = pgd_base + i; 143 144 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 145 pud = (pud_t *)pgd; 146 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 147 #ifdef __PAGETABLE_PMD_FOLDED 148 pmd = (pmd_t *)pud; 149 #else 150 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 151 pud_populate(&init_mm, pud, pmd); 152 pmd += k; 153 #endif 154 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 155 if (pmd_none(*pmd)) { 156 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 157 pmd_populate_kernel(&init_mm, pmd, pte); 158 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 159 } 160 vaddr += PMD_SIZE; 161 } 162 k = 0; 163 } 164 j = 0; 165 } 166 } 167 #endif /* CONFIG_MMU */ 168 169 void __init allocate_pgdat(unsigned int nid) 170 { 171 unsigned long start_pfn, end_pfn; 172 #ifdef CONFIG_NEED_MULTIPLE_NODES 173 unsigned long phys; 174 #endif 175 176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 177 178 #ifdef CONFIG_NEED_MULTIPLE_NODES 179 phys = __memblock_alloc_base(sizeof(struct pglist_data), 180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 181 /* Retry with all of system memory */ 182 if (!phys) 183 phys = __memblock_alloc_base(sizeof(struct pglist_data), 184 SMP_CACHE_BYTES, memblock_end_of_DRAM()); 185 if (!phys) 186 panic("Can't allocate pgdat for node %d\n", nid); 187 188 NODE_DATA(nid) = __va(phys); 189 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 190 191 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 192 #endif 193 194 NODE_DATA(nid)->node_start_pfn = start_pfn; 195 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 196 } 197 198 static void __init bootmem_init_one_node(unsigned int nid) 199 { 200 unsigned long total_pages, paddr; 201 unsigned long end_pfn; 202 struct pglist_data *p; 203 204 p = NODE_DATA(nid); 205 206 /* Nothing to do.. */ 207 if (!p->node_spanned_pages) 208 return; 209 210 end_pfn = p->node_start_pfn + p->node_spanned_pages; 211 212 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 213 214 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); 215 if (!paddr) 216 panic("Can't allocate bootmap for nid[%d]\n", nid); 217 218 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 219 220 free_bootmem_with_active_regions(nid, end_pfn); 221 222 /* 223 * XXX Handle initial reservations for the system memory node 224 * only for the moment, we'll refactor this later for handling 225 * reservations in other nodes. 226 */ 227 if (nid == 0) { 228 struct memblock_region *reg; 229 230 /* Reserve the sections we're already using. */ 231 for_each_memblock(reserved, reg) { 232 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 233 } 234 } 235 236 sparse_memory_present_with_active_regions(nid); 237 } 238 239 static void __init do_init_bootmem(void) 240 { 241 struct memblock_region *reg; 242 int i; 243 244 /* Add active regions with valid PFNs. */ 245 for_each_memblock(memory, reg) { 246 unsigned long start_pfn, end_pfn; 247 start_pfn = memblock_region_memory_base_pfn(reg); 248 end_pfn = memblock_region_memory_end_pfn(reg); 249 __add_active_range(0, start_pfn, end_pfn); 250 } 251 252 /* All of system RAM sits in node 0 for the non-NUMA case */ 253 allocate_pgdat(0); 254 node_set_online(0); 255 256 plat_mem_setup(); 257 258 for_each_online_node(i) 259 bootmem_init_one_node(i); 260 261 sparse_init(); 262 } 263 264 static void __init early_reserve_mem(void) 265 { 266 unsigned long start_pfn; 267 268 /* 269 * Partially used pages are not usable - thus 270 * we are rounding upwards: 271 */ 272 start_pfn = PFN_UP(__pa(_end)); 273 274 /* 275 * Reserve the kernel text and Reserve the bootmem bitmap. We do 276 * this in two steps (first step was init_bootmem()), because 277 * this catches the (definitely buggy) case of us accidentally 278 * initializing the bootmem allocator with an invalid RAM area. 279 */ 280 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 281 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 282 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 283 284 /* 285 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 286 */ 287 if (CONFIG_ZERO_PAGE_OFFSET != 0) 288 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 289 290 /* 291 * Handle additional early reservations 292 */ 293 check_for_initrd(); 294 reserve_crashkernel(); 295 } 296 297 void __init paging_init(void) 298 { 299 unsigned long max_zone_pfns[MAX_NR_ZONES]; 300 unsigned long vaddr, end; 301 int nid; 302 303 memblock_init(); 304 305 sh_mv.mv_mem_init(); 306 307 early_reserve_mem(); 308 309 memblock_enforce_memory_limit(memory_limit); 310 memblock_analyze(); 311 312 memblock_dump_all(); 313 314 /* 315 * Determine low and high memory ranges: 316 */ 317 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 318 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 319 320 nodes_clear(node_online_map); 321 322 memory_start = (unsigned long)__va(__MEMORY_START); 323 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); 324 325 uncached_init(); 326 pmb_init(); 327 do_init_bootmem(); 328 ioremap_fixed_init(); 329 330 /* We don't need to map the kernel through the TLB, as 331 * it is permanatly mapped using P1. So clear the 332 * entire pgd. */ 333 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 334 335 /* Set an initial value for the MMU.TTB so we don't have to 336 * check for a null value. */ 337 set_TTB(swapper_pg_dir); 338 339 /* 340 * Populate the relevant portions of swapper_pg_dir so that 341 * we can use the fixmap entries without calling kmalloc. 342 * pte's will be filled in by __set_fixmap(). 343 */ 344 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 345 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 346 page_table_range_init(vaddr, end, swapper_pg_dir); 347 348 kmap_coherent_init(); 349 350 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 351 352 for_each_online_node(nid) { 353 pg_data_t *pgdat = NODE_DATA(nid); 354 unsigned long low, start_pfn; 355 356 start_pfn = pgdat->bdata->node_min_pfn; 357 low = pgdat->bdata->node_low_pfn; 358 359 if (max_zone_pfns[ZONE_NORMAL] < low) 360 max_zone_pfns[ZONE_NORMAL] = low; 361 362 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 363 nid, start_pfn, low); 364 } 365 366 free_area_init_nodes(max_zone_pfns); 367 } 368 369 /* 370 * Early initialization for any I/O MMUs we might have. 371 */ 372 static void __init iommu_init(void) 373 { 374 no_iommu_init(); 375 } 376 377 unsigned int mem_init_done = 0; 378 379 void __init mem_init(void) 380 { 381 int codesize, datasize, initsize; 382 int nid; 383 384 iommu_init(); 385 386 num_physpages = 0; 387 high_memory = NULL; 388 389 for_each_online_node(nid) { 390 pg_data_t *pgdat = NODE_DATA(nid); 391 unsigned long node_pages = 0; 392 void *node_high_memory; 393 394 num_physpages += pgdat->node_present_pages; 395 396 if (pgdat->node_spanned_pages) 397 node_pages = free_all_bootmem_node(pgdat); 398 399 totalram_pages += node_pages; 400 401 node_high_memory = (void *)__va((pgdat->node_start_pfn + 402 pgdat->node_spanned_pages) << 403 PAGE_SHIFT); 404 if (node_high_memory > high_memory) 405 high_memory = node_high_memory; 406 } 407 408 /* Set this up early, so we can take care of the zero page */ 409 cpu_cache_init(); 410 411 /* clear the zero-page */ 412 memset(empty_zero_page, 0, PAGE_SIZE); 413 __flush_wback_region(empty_zero_page, PAGE_SIZE); 414 415 vsyscall_init(); 416 417 codesize = (unsigned long) &_etext - (unsigned long) &_text; 418 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 419 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 420 421 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 422 "%dk data, %dk init)\n", 423 nr_free_pages() << (PAGE_SHIFT-10), 424 num_physpages << (PAGE_SHIFT-10), 425 codesize >> 10, 426 datasize >> 10, 427 initsize >> 10); 428 429 printk(KERN_INFO "virtual kernel memory layout:\n" 430 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 431 #ifdef CONFIG_HIGHMEM 432 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 433 #endif 434 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 435 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" 436 #ifdef CONFIG_UNCACHED_MAPPING 437 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" 438 #endif 439 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" 440 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" 441 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 442 FIXADDR_START, FIXADDR_TOP, 443 (FIXADDR_TOP - FIXADDR_START) >> 10, 444 445 #ifdef CONFIG_HIGHMEM 446 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 447 (LAST_PKMAP*PAGE_SIZE) >> 10, 448 #endif 449 450 (unsigned long)VMALLOC_START, VMALLOC_END, 451 (VMALLOC_END - VMALLOC_START) >> 20, 452 453 (unsigned long)memory_start, (unsigned long)high_memory, 454 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, 455 456 #ifdef CONFIG_UNCACHED_MAPPING 457 uncached_start, uncached_end, uncached_size >> 20, 458 #endif 459 460 (unsigned long)&__init_begin, (unsigned long)&__init_end, 461 ((unsigned long)&__init_end - 462 (unsigned long)&__init_begin) >> 10, 463 464 (unsigned long)&_etext, (unsigned long)&_edata, 465 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, 466 467 (unsigned long)&_text, (unsigned long)&_etext, 468 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); 469 470 mem_init_done = 1; 471 } 472 473 void free_initmem(void) 474 { 475 unsigned long addr; 476 477 addr = (unsigned long)(&__init_begin); 478 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 479 ClearPageReserved(virt_to_page(addr)); 480 init_page_count(virt_to_page(addr)); 481 free_page(addr); 482 totalram_pages++; 483 } 484 printk("Freeing unused kernel memory: %ldk freed\n", 485 ((unsigned long)&__init_end - 486 (unsigned long)&__init_begin) >> 10); 487 } 488 489 #ifdef CONFIG_BLK_DEV_INITRD 490 void free_initrd_mem(unsigned long start, unsigned long end) 491 { 492 unsigned long p; 493 for (p = start; p < end; p += PAGE_SIZE) { 494 ClearPageReserved(virt_to_page(p)); 495 init_page_count(virt_to_page(p)); 496 free_page(p); 497 totalram_pages++; 498 } 499 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 500 } 501 #endif 502 503 #ifdef CONFIG_MEMORY_HOTPLUG 504 int arch_add_memory(int nid, u64 start, u64 size) 505 { 506 pg_data_t *pgdat; 507 unsigned long start_pfn = start >> PAGE_SHIFT; 508 unsigned long nr_pages = size >> PAGE_SHIFT; 509 int ret; 510 511 pgdat = NODE_DATA(nid); 512 513 /* We only have ZONE_NORMAL, so this is easy.. */ 514 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, 515 start_pfn, nr_pages); 516 if (unlikely(ret)) 517 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 518 519 return ret; 520 } 521 EXPORT_SYMBOL_GPL(arch_add_memory); 522 523 #ifdef CONFIG_NUMA 524 int memory_add_physaddr_to_nid(u64 addr) 525 { 526 /* Node 0 for now.. */ 527 return 0; 528 } 529 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 530 #endif 531 532 #endif /* CONFIG_MEMORY_HOTPLUG */ 533