1 /* 2 * sparse memory mappings. 3 */ 4 #include <linux/mm.h> 5 #include <linux/mmzone.h> 6 #include <linux/bootmem.h> 7 #include <linux/highmem.h> 8 #include <linux/module.h> 9 #include <linux/spinlock.h> 10 #include <linux/vmalloc.h> 11 #include "internal.h" 12 #include <asm/dma.h> 13 #include <asm/pgalloc.h> 14 #include <asm/pgtable.h> 15 16 /* 17 * Permanent SPARSEMEM data: 18 * 19 * 1) mem_section - memory sections, mem_map's for valid memory 20 */ 21 #ifdef CONFIG_SPARSEMEM_EXTREME 22 struct mem_section *mem_section[NR_SECTION_ROOTS] 23 ____cacheline_internodealigned_in_smp; 24 #else 25 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 26 ____cacheline_internodealigned_in_smp; 27 #endif 28 EXPORT_SYMBOL(mem_section); 29 30 #ifdef NODE_NOT_IN_PAGE_FLAGS 31 /* 32 * If we did not store the node number in the page then we have to 33 * do a lookup in the section_to_node_table in order to find which 34 * node the page belongs to. 35 */ 36 #if MAX_NUMNODES <= 256 37 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 38 #else 39 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 40 #endif 41 42 int page_to_nid(struct page *page) 43 { 44 return section_to_node_table[page_to_section(page)]; 45 } 46 EXPORT_SYMBOL(page_to_nid); 47 48 static void set_section_nid(unsigned long section_nr, int nid) 49 { 50 section_to_node_table[section_nr] = nid; 51 } 52 #else /* !NODE_NOT_IN_PAGE_FLAGS */ 53 static inline void set_section_nid(unsigned long section_nr, int nid) 54 { 55 } 56 #endif 57 58 #ifdef CONFIG_SPARSEMEM_EXTREME 59 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) 60 { 61 struct mem_section *section = NULL; 62 unsigned long array_size = SECTIONS_PER_ROOT * 63 sizeof(struct mem_section); 64 65 if (slab_is_available()) { 66 if (node_state(nid, N_HIGH_MEMORY)) 67 section = kmalloc_node(array_size, GFP_KERNEL, nid); 68 else 69 section = kmalloc(array_size, GFP_KERNEL); 70 } else 71 section = alloc_bootmem_node(NODE_DATA(nid), array_size); 72 73 if (section) 74 memset(section, 0, array_size); 75 76 return section; 77 } 78 79 static int __meminit sparse_index_init(unsigned long section_nr, int nid) 80 { 81 static DEFINE_SPINLOCK(index_init_lock); 82 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 83 struct mem_section *section; 84 int ret = 0; 85 86 if (mem_section[root]) 87 return -EEXIST; 88 89 section = sparse_index_alloc(nid); 90 if (!section) 91 return -ENOMEM; 92 /* 93 * This lock keeps two different sections from 94 * reallocating for the same index 95 */ 96 spin_lock(&index_init_lock); 97 98 if (mem_section[root]) { 99 ret = -EEXIST; 100 goto out; 101 } 102 103 mem_section[root] = section; 104 out: 105 spin_unlock(&index_init_lock); 106 return ret; 107 } 108 #else /* !SPARSEMEM_EXTREME */ 109 static inline int sparse_index_init(unsigned long section_nr, int nid) 110 { 111 return 0; 112 } 113 #endif 114 115 /* 116 * Although written for the SPARSEMEM_EXTREME case, this happens 117 * to also work for the flat array case because 118 * NR_SECTION_ROOTS==NR_MEM_SECTIONS. 119 */ 120 int __section_nr(struct mem_section* ms) 121 { 122 unsigned long root_nr; 123 struct mem_section* root; 124 125 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 126 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 127 if (!root) 128 continue; 129 130 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) 131 break; 132 } 133 134 return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 135 } 136 137 /* 138 * During early boot, before section_mem_map is used for an actual 139 * mem_map, we use section_mem_map to store the section's NUMA 140 * node. This keeps us from having to use another data structure. The 141 * node information is cleared just before we store the real mem_map. 142 */ 143 static inline unsigned long sparse_encode_early_nid(int nid) 144 { 145 return (nid << SECTION_NID_SHIFT); 146 } 147 148 static inline int sparse_early_nid(struct mem_section *section) 149 { 150 return (section->section_mem_map >> SECTION_NID_SHIFT); 151 } 152 153 /* Validate the physical addressing limitations of the model */ 154 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 155 unsigned long *end_pfn) 156 { 157 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 158 159 /* 160 * Sanity checks - do not allow an architecture to pass 161 * in larger pfns than the maximum scope of sparsemem: 162 */ 163 if (*start_pfn > max_sparsemem_pfn) { 164 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 165 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 166 *start_pfn, *end_pfn, max_sparsemem_pfn); 167 WARN_ON_ONCE(1); 168 *start_pfn = max_sparsemem_pfn; 169 *end_pfn = max_sparsemem_pfn; 170 } else if (*end_pfn > max_sparsemem_pfn) { 171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 173 *start_pfn, *end_pfn, max_sparsemem_pfn); 174 WARN_ON_ONCE(1); 175 *end_pfn = max_sparsemem_pfn; 176 } 177 } 178 179 /* Record a memory area against a node. */ 180 void __init memory_present(int nid, unsigned long start, unsigned long end) 181 { 182 unsigned long pfn; 183 184 start &= PAGE_SECTION_MASK; 185 mminit_validate_memmodel_limits(&start, &end); 186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 187 unsigned long section = pfn_to_section_nr(pfn); 188 struct mem_section *ms; 189 190 sparse_index_init(section, nid); 191 set_section_nid(section, nid); 192 193 ms = __nr_to_section(section); 194 if (!ms->section_mem_map) 195 ms->section_mem_map = sparse_encode_early_nid(nid) | 196 SECTION_MARKED_PRESENT; 197 } 198 } 199 200 /* 201 * Only used by the i386 NUMA architecures, but relatively 202 * generic code. 203 */ 204 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, 205 unsigned long end_pfn) 206 { 207 unsigned long pfn; 208 unsigned long nr_pages = 0; 209 210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 212 if (nid != early_pfn_to_nid(pfn)) 213 continue; 214 215 if (pfn_present(pfn)) 216 nr_pages += PAGES_PER_SECTION; 217 } 218 219 return nr_pages * sizeof(struct page); 220 } 221 222 /* 223 * Subtle, we encode the real pfn into the mem_map such that 224 * the identity pfn - section_mem_map will return the actual 225 * physical page frame number. 226 */ 227 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 228 { 229 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 230 } 231 232 /* 233 * Decode mem_map from the coded memmap 234 */ 235 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 236 { 237 /* mask off the extra low bits of information */ 238 coded_mem_map &= SECTION_MAP_MASK; 239 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 240 } 241 242 static int __meminit sparse_init_one_section(struct mem_section *ms, 243 unsigned long pnum, struct page *mem_map, 244 unsigned long *pageblock_bitmap) 245 { 246 if (!present_section(ms)) 247 return -EINVAL; 248 249 ms->section_mem_map &= ~SECTION_MAP_MASK; 250 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 251 SECTION_HAS_MEM_MAP; 252 ms->pageblock_flags = pageblock_bitmap; 253 254 return 1; 255 } 256 257 unsigned long usemap_size(void) 258 { 259 unsigned long size_bytes; 260 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; 261 size_bytes = roundup(size_bytes, sizeof(unsigned long)); 262 return size_bytes; 263 } 264 265 #ifdef CONFIG_MEMORY_HOTPLUG 266 static unsigned long *__kmalloc_section_usemap(void) 267 { 268 return kmalloc(usemap_size(), GFP_KERNEL); 269 } 270 #endif /* CONFIG_MEMORY_HOTPLUG */ 271 272 #ifdef CONFIG_MEMORY_HOTREMOVE 273 static unsigned long * __init 274 sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 275 { 276 unsigned long section_nr; 277 278 /* 279 * A page may contain usemaps for other sections preventing the 280 * page being freed and making a section unremovable while 281 * other sections referencing the usemap retmain active. Similarly, 282 * a pgdat can prevent a section being removed. If section A 283 * contains a pgdat and section B contains the usemap, both 284 * sections become inter-dependent. This allocates usemaps 285 * from the same section as the pgdat where possible to avoid 286 * this problem. 287 */ 288 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 289 return alloc_bootmem_section(usemap_size(), section_nr); 290 } 291 292 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 293 { 294 unsigned long usemap_snr, pgdat_snr; 295 static unsigned long old_usemap_snr = NR_MEM_SECTIONS; 296 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; 297 struct pglist_data *pgdat = NODE_DATA(nid); 298 int usemap_nid; 299 300 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 301 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 302 if (usemap_snr == pgdat_snr) 303 return; 304 305 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 306 /* skip redundant message */ 307 return; 308 309 old_usemap_snr = usemap_snr; 310 old_pgdat_snr = pgdat_snr; 311 312 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 313 if (usemap_nid != nid) { 314 printk(KERN_INFO 315 "node %d must be removed before remove section %ld\n", 316 nid, usemap_snr); 317 return; 318 } 319 /* 320 * There is a circular dependency. 321 * Some platforms allow un-removable section because they will just 322 * gather other removable sections for dynamic partitioning. 323 * Just notify un-removable section's number here. 324 */ 325 printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, 326 pgdat_snr, nid); 327 printk(KERN_CONT 328 " have a circular dependency on usemap and pgdat allocations\n"); 329 } 330 #else 331 static unsigned long * __init 332 sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) 333 { 334 return NULL; 335 } 336 337 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 338 { 339 } 340 #endif /* CONFIG_MEMORY_HOTREMOVE */ 341 342 static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) 343 { 344 unsigned long *usemap; 345 struct mem_section *ms = __nr_to_section(pnum); 346 int nid = sparse_early_nid(ms); 347 348 usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid)); 349 if (usemap) 350 return usemap; 351 352 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); 353 if (usemap) { 354 check_usemap_section_nr(nid, usemap); 355 return usemap; 356 } 357 358 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ 359 nid = 0; 360 361 printk(KERN_WARNING "%s: allocation failed\n", __func__); 362 return NULL; 363 } 364 365 #ifndef CONFIG_SPARSEMEM_VMEMMAP 366 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) 367 { 368 struct page *map; 369 370 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 371 if (map) 372 return map; 373 374 map = alloc_bootmem_pages_node(NODE_DATA(nid), 375 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); 376 return map; 377 } 378 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 379 380 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 381 { 382 struct page *map; 383 struct mem_section *ms = __nr_to_section(pnum); 384 int nid = sparse_early_nid(ms); 385 386 map = sparse_mem_map_populate(pnum, nid); 387 if (map) 388 return map; 389 390 printk(KERN_ERR "%s: sparsemem memory map backing failed " 391 "some memory will not be available.\n", __func__); 392 ms->section_mem_map = 0; 393 return NULL; 394 } 395 396 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) 397 { 398 } 399 /* 400 * Allocate the accumulated non-linear sections, allocate a mem_map 401 * for each and record the physical to section mapping. 402 */ 403 void __init sparse_init(void) 404 { 405 unsigned long pnum; 406 struct page *map; 407 unsigned long *usemap; 408 unsigned long **usemap_map; 409 int size; 410 411 /* 412 * map is using big page (aka 2M in x86 64 bit) 413 * usemap is less one page (aka 24 bytes) 414 * so alloc 2M (with 2M align) and 24 bytes in turn will 415 * make next 2M slip to one more 2M later. 416 * then in big system, the memory will have a lot of holes... 417 * here try to allocate 2M pages continously. 418 * 419 * powerpc need to call sparse_init_one_section right after each 420 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 421 */ 422 size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 423 usemap_map = alloc_bootmem(size); 424 if (!usemap_map) 425 panic("can not allocate usemap_map\n"); 426 427 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 428 if (!present_section_nr(pnum)) 429 continue; 430 usemap_map[pnum] = sparse_early_usemap_alloc(pnum); 431 } 432 433 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 434 if (!present_section_nr(pnum)) 435 continue; 436 437 usemap = usemap_map[pnum]; 438 if (!usemap) 439 continue; 440 441 map = sparse_early_mem_map_alloc(pnum); 442 if (!map) 443 continue; 444 445 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 446 usemap); 447 } 448 449 vmemmap_populate_print_last(); 450 451 free_bootmem(__pa(usemap_map), size); 452 } 453 454 #ifdef CONFIG_MEMORY_HOTPLUG 455 #ifdef CONFIG_SPARSEMEM_VMEMMAP 456 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 457 unsigned long nr_pages) 458 { 459 /* This will make the necessary allocations eventually. */ 460 return sparse_mem_map_populate(pnum, nid); 461 } 462 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 463 { 464 return; /* XXX: Not implemented yet */ 465 } 466 static void free_map_bootmem(struct page *page, unsigned long nr_pages) 467 { 468 } 469 #else 470 static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 471 { 472 struct page *page, *ret; 473 unsigned long memmap_size = sizeof(struct page) * nr_pages; 474 475 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 476 if (page) 477 goto got_map_page; 478 479 ret = vmalloc(memmap_size); 480 if (ret) 481 goto got_map_ptr; 482 483 return NULL; 484 got_map_page: 485 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 486 got_map_ptr: 487 memset(ret, 0, memmap_size); 488 489 return ret; 490 } 491 492 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 493 unsigned long nr_pages) 494 { 495 return __kmalloc_section_memmap(nr_pages); 496 } 497 498 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 499 { 500 if (is_vmalloc_addr(memmap)) 501 vfree(memmap); 502 else 503 free_pages((unsigned long)memmap, 504 get_order(sizeof(struct page) * nr_pages)); 505 } 506 507 static void free_map_bootmem(struct page *page, unsigned long nr_pages) 508 { 509 unsigned long maps_section_nr, removing_section_nr, i; 510 int magic; 511 512 for (i = 0; i < nr_pages; i++, page++) { 513 magic = atomic_read(&page->_mapcount); 514 515 BUG_ON(magic == NODE_INFO); 516 517 maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 518 removing_section_nr = page->private; 519 520 /* 521 * When this function is called, the removing section is 522 * logical offlined state. This means all pages are isolated 523 * from page allocator. If removing section's memmap is placed 524 * on the same section, it must not be freed. 525 * If it is freed, page allocator may allocate it which will 526 * be removed physically soon. 527 */ 528 if (maps_section_nr != removing_section_nr) 529 put_page_bootmem(page); 530 } 531 } 532 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 533 534 static void free_section_usemap(struct page *memmap, unsigned long *usemap) 535 { 536 struct page *usemap_page; 537 unsigned long nr_pages; 538 539 if (!usemap) 540 return; 541 542 usemap_page = virt_to_page(usemap); 543 /* 544 * Check to see if allocation came from hot-plug-add 545 */ 546 if (PageSlab(usemap_page)) { 547 kfree(usemap); 548 if (memmap) 549 __kfree_section_memmap(memmap, PAGES_PER_SECTION); 550 return; 551 } 552 553 /* 554 * The usemap came from bootmem. This is packed with other usemaps 555 * on the section which has pgdat at boot time. Just keep it as is now. 556 */ 557 558 if (memmap) { 559 struct page *memmap_page; 560 memmap_page = virt_to_page(memmap); 561 562 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 563 >> PAGE_SHIFT; 564 565 free_map_bootmem(memmap_page, nr_pages); 566 } 567 } 568 569 /* 570 * returns the number of sections whose mem_maps were properly 571 * set. If this is <=0, then that means that the passed-in 572 * map was not consumed and must be freed. 573 */ 574 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 575 int nr_pages) 576 { 577 unsigned long section_nr = pfn_to_section_nr(start_pfn); 578 struct pglist_data *pgdat = zone->zone_pgdat; 579 struct mem_section *ms; 580 struct page *memmap; 581 unsigned long *usemap; 582 unsigned long flags; 583 int ret; 584 585 /* 586 * no locking for this, because it does its own 587 * plus, it does a kmalloc 588 */ 589 ret = sparse_index_init(section_nr, pgdat->node_id); 590 if (ret < 0 && ret != -EEXIST) 591 return ret; 592 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); 593 if (!memmap) 594 return -ENOMEM; 595 usemap = __kmalloc_section_usemap(); 596 if (!usemap) { 597 __kfree_section_memmap(memmap, nr_pages); 598 return -ENOMEM; 599 } 600 601 pgdat_resize_lock(pgdat, &flags); 602 603 ms = __pfn_to_section(start_pfn); 604 if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 605 ret = -EEXIST; 606 goto out; 607 } 608 609 ms->section_mem_map |= SECTION_MARKED_PRESENT; 610 611 ret = sparse_init_one_section(ms, section_nr, memmap, usemap); 612 613 out: 614 pgdat_resize_unlock(pgdat, &flags); 615 if (ret <= 0) { 616 kfree(usemap); 617 __kfree_section_memmap(memmap, nr_pages); 618 } 619 return ret; 620 } 621 622 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) 623 { 624 struct page *memmap = NULL; 625 unsigned long *usemap = NULL; 626 627 if (ms->section_mem_map) { 628 usemap = ms->pageblock_flags; 629 memmap = sparse_decode_mem_map(ms->section_mem_map, 630 __section_nr(ms)); 631 ms->section_mem_map = 0; 632 ms->pageblock_flags = NULL; 633 } 634 635 free_section_usemap(memmap, usemap); 636 } 637 #endif 638