1 /* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2010 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/init.h> 13 #include <linux/gfp.h> 14 #include <linux/bootmem.h> 15 #include <linux/proc_fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/percpu.h> 18 #include <linux/io.h> 19 #include <linux/memblock.h> 20 #include <linux/dma-mapping.h> 21 #include <asm/mmu_context.h> 22 #include <asm/mmzone.h> 23 #include <asm/kexec.h> 24 #include <asm/tlb.h> 25 #include <asm/cacheflush.h> 26 #include <asm/sections.h> 27 #include <asm/setup.h> 28 #include <asm/cache.h> 29 #include <asm/sizes.h> 30 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34 void __init generic_mem_init(void) 35 { 36 memblock_add(__MEMORY_START, __MEMORY_SIZE); 37 } 38 39 void __init __weak plat_mem_setup(void) 40 { 41 /* Nothing to see here, move along. */ 42 } 43 44 #ifdef CONFIG_MMU 45 static pte_t *__get_pte_phys(unsigned long addr) 46 { 47 pgd_t *pgd; 48 pud_t *pud; 49 pmd_t *pmd; 50 51 pgd = pgd_offset_k(addr); 52 if (pgd_none(*pgd)) { 53 pgd_ERROR(*pgd); 54 return NULL; 55 } 56 57 pud = pud_alloc(NULL, pgd, addr); 58 if (unlikely(!pud)) { 59 pud_ERROR(*pud); 60 return NULL; 61 } 62 63 pmd = pmd_alloc(NULL, pud, addr); 64 if (unlikely(!pmd)) { 65 pmd_ERROR(*pmd); 66 return NULL; 67 } 68 69 return pte_offset_kernel(pmd, addr); 70 } 71 72 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 73 { 74 pte_t *pte; 75 76 pte = __get_pte_phys(addr); 77 if (!pte_none(*pte)) { 78 pte_ERROR(*pte); 79 return; 80 } 81 82 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 83 local_flush_tlb_one(get_asid(), addr); 84 85 if (pgprot_val(prot) & _PAGE_WIRED) 86 tlb_wire_entry(NULL, addr, *pte); 87 } 88 89 static void clear_pte_phys(unsigned long addr, pgprot_t prot) 90 { 91 pte_t *pte; 92 93 pte = __get_pte_phys(addr); 94 95 if (pgprot_val(prot) & _PAGE_WIRED) 96 tlb_unwire_entry(); 97 98 set_pte(pte, pfn_pte(0, __pgprot(0))); 99 local_flush_tlb_one(get_asid(), addr); 100 } 101 102 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 103 { 104 unsigned long address = __fix_to_virt(idx); 105 106 if (idx >= __end_of_fixed_addresses) { 107 BUG(); 108 return; 109 } 110 111 set_pte_phys(address, phys, prot); 112 } 113 114 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) 115 { 116 unsigned long address = __fix_to_virt(idx); 117 118 if (idx >= __end_of_fixed_addresses) { 119 BUG(); 120 return; 121 } 122 123 clear_pte_phys(address, prot); 124 } 125 126 static pmd_t * __init one_md_table_init(pud_t *pud) 127 { 128 if (pud_none(*pud)) { 129 pmd_t *pmd; 130 131 pmd = alloc_bootmem_pages(PAGE_SIZE); 132 pud_populate(&init_mm, pud, pmd); 133 BUG_ON(pmd != pmd_offset(pud, 0)); 134 } 135 136 return pmd_offset(pud, 0); 137 } 138 139 static pte_t * __init one_page_table_init(pmd_t *pmd) 140 { 141 if (pmd_none(*pmd)) { 142 pte_t *pte; 143 144 pte = alloc_bootmem_pages(PAGE_SIZE); 145 pmd_populate_kernel(&init_mm, pmd, pte); 146 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 147 } 148 149 return pte_offset_kernel(pmd, 0); 150 } 151 152 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 153 unsigned long vaddr, pte_t *lastpte) 154 { 155 return pte; 156 } 157 158 void __init page_table_range_init(unsigned long start, unsigned long end, 159 pgd_t *pgd_base) 160 { 161 pgd_t *pgd; 162 pud_t *pud; 163 pmd_t *pmd; 164 pte_t *pte = NULL; 165 int i, j, k; 166 unsigned long vaddr; 167 168 vaddr = start; 169 i = __pgd_offset(vaddr); 170 j = __pud_offset(vaddr); 171 k = __pmd_offset(vaddr); 172 pgd = pgd_base + i; 173 174 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 175 pud = (pud_t *)pgd; 176 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 177 pmd = one_md_table_init(pud); 178 #ifndef __PAGETABLE_PMD_FOLDED 179 pmd += k; 180 #endif 181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 182 pte = page_table_kmap_check(one_page_table_init(pmd), 183 pmd, vaddr, pte); 184 vaddr += PMD_SIZE; 185 } 186 k = 0; 187 } 188 j = 0; 189 } 190 } 191 #endif /* CONFIG_MMU */ 192 193 void __init allocate_pgdat(unsigned int nid) 194 { 195 unsigned long start_pfn, end_pfn; 196 #ifdef CONFIG_NEED_MULTIPLE_NODES 197 unsigned long phys; 198 #endif 199 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 201 202 #ifdef CONFIG_NEED_MULTIPLE_NODES 203 phys = __memblock_alloc_base(sizeof(struct pglist_data), 204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 205 /* Retry with all of system memory */ 206 if (!phys) 207 phys = __memblock_alloc_base(sizeof(struct pglist_data), 208 SMP_CACHE_BYTES, memblock_end_of_DRAM()); 209 if (!phys) 210 panic("Can't allocate pgdat for node %d\n", nid); 211 212 NODE_DATA(nid) = __va(phys); 213 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 214 215 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 216 #endif 217 218 NODE_DATA(nid)->node_start_pfn = start_pfn; 219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 220 } 221 222 static void __init bootmem_init_one_node(unsigned int nid) 223 { 224 unsigned long total_pages, paddr; 225 unsigned long end_pfn; 226 struct pglist_data *p; 227 int i; 228 229 p = NODE_DATA(nid); 230 231 /* Nothing to do.. */ 232 if (!p->node_spanned_pages) 233 return; 234 235 end_pfn = p->node_start_pfn + p->node_spanned_pages; 236 237 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 238 239 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); 240 if (!paddr) 241 panic("Can't allocate bootmap for nid[%d]\n", nid); 242 243 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 244 245 free_bootmem_with_active_regions(nid, end_pfn); 246 247 /* 248 * XXX Handle initial reservations for the system memory node 249 * only for the moment, we'll refactor this later for handling 250 * reservations in other nodes. 251 */ 252 if (nid == 0) { 253 /* Reserve the sections we're already using. */ 254 for (i = 0; i < memblock.reserved.cnt; i++) 255 reserve_bootmem(memblock.reserved.region[i].base, 256 memblock_size_bytes(&memblock.reserved, i), 257 BOOTMEM_DEFAULT); 258 } 259 260 sparse_memory_present_with_active_regions(nid); 261 } 262 263 static void __init do_init_bootmem(void) 264 { 265 int i; 266 267 /* Add active regions with valid PFNs. */ 268 for (i = 0; i < memblock.memory.cnt; i++) { 269 unsigned long start_pfn, end_pfn; 270 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; 271 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); 272 __add_active_range(0, start_pfn, end_pfn); 273 } 274 275 /* All of system RAM sits in node 0 for the non-NUMA case */ 276 allocate_pgdat(0); 277 node_set_online(0); 278 279 plat_mem_setup(); 280 281 for_each_online_node(i) 282 bootmem_init_one_node(i); 283 284 sparse_init(); 285 } 286 287 static void __init early_reserve_mem(void) 288 { 289 unsigned long start_pfn; 290 291 /* 292 * Partially used pages are not usable - thus 293 * we are rounding upwards: 294 */ 295 start_pfn = PFN_UP(__pa(_end)); 296 297 /* 298 * Reserve the kernel text and Reserve the bootmem bitmap. We do 299 * this in two steps (first step was init_bootmem()), because 300 * this catches the (definitely buggy) case of us accidentally 301 * initializing the bootmem allocator with an invalid RAM area. 302 */ 303 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 306 307 /* 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 309 */ 310 if (CONFIG_ZERO_PAGE_OFFSET != 0) 311 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 312 313 /* 314 * Handle additional early reservations 315 */ 316 check_for_initrd(); 317 reserve_crashkernel(); 318 } 319 320 void __init paging_init(void) 321 { 322 unsigned long max_zone_pfns[MAX_NR_ZONES]; 323 unsigned long vaddr, end; 324 int nid; 325 326 memblock_init(); 327 328 sh_mv.mv_mem_init(); 329 330 early_reserve_mem(); 331 332 memblock_enforce_memory_limit(memory_limit); 333 memblock_analyze(); 334 335 memblock_dump_all(); 336 337 /* 338 * Determine low and high memory ranges: 339 */ 340 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 341 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 342 343 nodes_clear(node_online_map); 344 345 memory_start = (unsigned long)__va(__MEMORY_START); 346 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); 347 348 uncached_init(); 349 pmb_init(); 350 do_init_bootmem(); 351 ioremap_fixed_init(); 352 353 /* We don't need to map the kernel through the TLB, as 354 * it is permanatly mapped using P1. So clear the 355 * entire pgd. */ 356 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 357 358 /* Set an initial value for the MMU.TTB so we don't have to 359 * check for a null value. */ 360 set_TTB(swapper_pg_dir); 361 362 /* 363 * Populate the relevant portions of swapper_pg_dir so that 364 * we can use the fixmap entries without calling kmalloc. 365 * pte's will be filled in by __set_fixmap(). 366 */ 367 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 368 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 369 page_table_range_init(vaddr, end, swapper_pg_dir); 370 371 kmap_coherent_init(); 372 373 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 374 375 for_each_online_node(nid) { 376 pg_data_t *pgdat = NODE_DATA(nid); 377 unsigned long low, start_pfn; 378 379 start_pfn = pgdat->bdata->node_min_pfn; 380 low = pgdat->bdata->node_low_pfn; 381 382 if (max_zone_pfns[ZONE_NORMAL] < low) 383 max_zone_pfns[ZONE_NORMAL] = low; 384 385 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 386 nid, start_pfn, low); 387 } 388 389 free_area_init_nodes(max_zone_pfns); 390 } 391 392 /* 393 * Early initialization for any I/O MMUs we might have. 394 */ 395 static void __init iommu_init(void) 396 { 397 no_iommu_init(); 398 } 399 400 unsigned int mem_init_done = 0; 401 402 void __init mem_init(void) 403 { 404 int codesize, datasize, initsize; 405 int nid; 406 407 iommu_init(); 408 409 num_physpages = 0; 410 high_memory = NULL; 411 412 for_each_online_node(nid) { 413 pg_data_t *pgdat = NODE_DATA(nid); 414 unsigned long node_pages = 0; 415 void *node_high_memory; 416 417 num_physpages += pgdat->node_present_pages; 418 419 if (pgdat->node_spanned_pages) 420 node_pages = free_all_bootmem_node(pgdat); 421 422 totalram_pages += node_pages; 423 424 node_high_memory = (void *)__va((pgdat->node_start_pfn + 425 pgdat->node_spanned_pages) << 426 PAGE_SHIFT); 427 if (node_high_memory > high_memory) 428 high_memory = node_high_memory; 429 } 430 431 /* Set this up early, so we can take care of the zero page */ 432 cpu_cache_init(); 433 434 /* clear the zero-page */ 435 memset(empty_zero_page, 0, PAGE_SIZE); 436 __flush_wback_region(empty_zero_page, PAGE_SIZE); 437 438 vsyscall_init(); 439 440 codesize = (unsigned long) &_etext - (unsigned long) &_text; 441 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 442 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 443 444 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 445 "%dk data, %dk init)\n", 446 nr_free_pages() << (PAGE_SHIFT-10), 447 num_physpages << (PAGE_SHIFT-10), 448 codesize >> 10, 449 datasize >> 10, 450 initsize >> 10); 451 452 printk(KERN_INFO "virtual kernel memory layout:\n" 453 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 454 #ifdef CONFIG_HIGHMEM 455 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 456 #endif 457 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" 458 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" 459 #ifdef CONFIG_UNCACHED_MAPPING 460 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" 461 #endif 462 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" 463 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" 464 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 465 FIXADDR_START, FIXADDR_TOP, 466 (FIXADDR_TOP - FIXADDR_START) >> 10, 467 468 #ifdef CONFIG_HIGHMEM 469 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, 470 (LAST_PKMAP*PAGE_SIZE) >> 10, 471 #endif 472 473 (unsigned long)VMALLOC_START, VMALLOC_END, 474 (VMALLOC_END - VMALLOC_START) >> 20, 475 476 (unsigned long)memory_start, (unsigned long)high_memory, 477 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, 478 479 #ifdef CONFIG_UNCACHED_MAPPING 480 uncached_start, uncached_end, uncached_size >> 20, 481 #endif 482 483 (unsigned long)&__init_begin, (unsigned long)&__init_end, 484 ((unsigned long)&__init_end - 485 (unsigned long)&__init_begin) >> 10, 486 487 (unsigned long)&_etext, (unsigned long)&_edata, 488 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, 489 490 (unsigned long)&_text, (unsigned long)&_etext, 491 ((unsigned long)&_etext - (unsigned long)&_text) >> 10); 492 493 mem_init_done = 1; 494 } 495 496 void free_initmem(void) 497 { 498 unsigned long addr; 499 500 addr = (unsigned long)(&__init_begin); 501 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 502 ClearPageReserved(virt_to_page(addr)); 503 init_page_count(virt_to_page(addr)); 504 free_page(addr); 505 totalram_pages++; 506 } 507 printk("Freeing unused kernel memory: %ldk freed\n", 508 ((unsigned long)&__init_end - 509 (unsigned long)&__init_begin) >> 10); 510 } 511 512 #ifdef CONFIG_BLK_DEV_INITRD 513 void free_initrd_mem(unsigned long start, unsigned long end) 514 { 515 unsigned long p; 516 for (p = start; p < end; p += PAGE_SIZE) { 517 ClearPageReserved(virt_to_page(p)); 518 init_page_count(virt_to_page(p)); 519 free_page(p); 520 totalram_pages++; 521 } 522 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 523 } 524 #endif 525 526 #ifdef CONFIG_MEMORY_HOTPLUG 527 int arch_add_memory(int nid, u64 start, u64 size) 528 { 529 pg_data_t *pgdat; 530 unsigned long start_pfn = start >> PAGE_SHIFT; 531 unsigned long nr_pages = size >> PAGE_SHIFT; 532 int ret; 533 534 pgdat = NODE_DATA(nid); 535 536 /* We only have ZONE_NORMAL, so this is easy.. */ 537 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, 538 start_pfn, nr_pages); 539 if (unlikely(ret)) 540 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 541 542 return ret; 543 } 544 EXPORT_SYMBOL_GPL(arch_add_memory); 545 546 #ifdef CONFIG_NUMA 547 int memory_add_physaddr_to_nid(u64 addr) 548 { 549 /* Node 0 for now.. */ 550 return 0; 551 } 552 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 553 #endif 554 555 #endif /* CONFIG_MEMORY_HOTPLUG */ 556