page_alloc.c (b66484cd74706fa8681d051840fe4b18a3da40ff) page_alloc.c (38addce8b600ca335dc86fa3d48c890f1c6fa1f4)
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 77 unchanged lines hidden (view full) ---

86 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
87 * defined in <linux/topology.h>.
88 */
89DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
90EXPORT_PER_CPU_SYMBOL(_numa_mem_);
91int _node_numa_mem_[MAX_NUMNODES];
92#endif
93
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 77 unchanged lines hidden (view full) ---

86 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
87 * defined in <linux/topology.h>.
88 */
89DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
90EXPORT_PER_CPU_SYMBOL(_numa_mem_);
91int _node_numa_mem_[MAX_NUMNODES];
92#endif
93
94#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
95volatile u64 latent_entropy;
96EXPORT_SYMBOL(latent_entropy);
97#endif
98
94/*
95 * Array of node states.
96 */
97nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
98 [N_POSSIBLE] = NODE_MASK_ALL,
99 [N_ONLINE] = { { [0] = 1UL } },
100#ifndef CONFIG_NUMA
101 [N_NORMAL_MEMORY] = { { [0] = 1UL } },

--- 500 unchanged lines hidden (view full) ---

602early_param("debug_pagealloc", early_debug_pagealloc);
603
604static bool need_debug_guardpage(void)
605{
606 /* If we don't use debug_pagealloc, we don't need guard page */
607 if (!debug_pagealloc_enabled())
608 return false;
609
99/*
100 * Array of node states.
101 */
102nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
103 [N_POSSIBLE] = NODE_MASK_ALL,
104 [N_ONLINE] = { { [0] = 1UL } },
105#ifndef CONFIG_NUMA
106 [N_NORMAL_MEMORY] = { { [0] = 1UL } },

--- 500 unchanged lines hidden (view full) ---

607early_param("debug_pagealloc", early_debug_pagealloc);
608
609static bool need_debug_guardpage(void)
610{
611 /* If we don't use debug_pagealloc, we don't need guard page */
612 if (!debug_pagealloc_enabled())
613 return false;
614
610 if (!debug_guardpage_minorder())
611 return false;
612
613 return true;
614}
615
616static void init_debug_guardpage(void)
617{
618 if (!debug_pagealloc_enabled())
619 return;
620
615 return true;
616}
617
618static void init_debug_guardpage(void)
619{
620 if (!debug_pagealloc_enabled())
621 return;
622
621 if (!debug_guardpage_minorder())
622 return;
623
624 _debug_guardpage_enabled = true;
625}
626
627struct page_ext_operations debug_guardpage_ops = {
628 .need = need_debug_guardpage,
629 .init = init_debug_guardpage,
630};
631

--- 4 unchanged lines hidden (view full) ---

636 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
637 pr_err("Bad debug_guardpage_minorder value\n");
638 return 0;
639 }
640 _debug_guardpage_minorder = res;
641 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
642 return 0;
643}
623 _debug_guardpage_enabled = true;
624}
625
626struct page_ext_operations debug_guardpage_ops = {
627 .need = need_debug_guardpage,
628 .init = init_debug_guardpage,
629};
630

--- 4 unchanged lines hidden (view full) ---

635 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
636 pr_err("Bad debug_guardpage_minorder value\n");
637 return 0;
638 }
639 _debug_guardpage_minorder = res;
640 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
641 return 0;
642}
644early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
643__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
645
644
646static inline bool set_page_guard(struct zone *zone, struct page *page,
645static inline void set_page_guard(struct zone *zone, struct page *page,
647 unsigned int order, int migratetype)
648{
649 struct page_ext *page_ext;
650
651 if (!debug_guardpage_enabled())
646 unsigned int order, int migratetype)
647{
648 struct page_ext *page_ext;
649
650 if (!debug_guardpage_enabled())
652 return false;
651 return;
653
652
654 if (order >= debug_guardpage_minorder())
655 return false;
656
657 page_ext = lookup_page_ext(page);
658 if (unlikely(!page_ext))
653 page_ext = lookup_page_ext(page);
654 if (unlikely(!page_ext))
659 return false;
655 return;
660
661 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
662
663 INIT_LIST_HEAD(&page->lru);
664 set_page_private(page, order);
665 /* Guard pages are not available for any usage */
666 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
656
657 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
658
659 INIT_LIST_HEAD(&page->lru);
660 set_page_private(page, order);
661 /* Guard pages are not available for any usage */
662 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
667
668 return true;
669}
670
671static inline void clear_page_guard(struct zone *zone, struct page *page,
672 unsigned int order, int migratetype)
673{
674 struct page_ext *page_ext;
675
676 if (!debug_guardpage_enabled())

--- 5 unchanged lines hidden (view full) ---

682
683 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
684
685 set_page_private(page, 0);
686 if (!is_migrate_isolate(migratetype))
687 __mod_zone_freepage_state(zone, (1 << order), migratetype);
688}
689#else
663}
664
665static inline void clear_page_guard(struct zone *zone, struct page *page,
666 unsigned int order, int migratetype)
667{
668 struct page_ext *page_ext;
669
670 if (!debug_guardpage_enabled())

--- 5 unchanged lines hidden (view full) ---

676
677 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
678
679 set_page_private(page, 0);
680 if (!is_migrate_isolate(migratetype))
681 __mod_zone_freepage_state(zone, (1 << order), migratetype);
682}
683#else
690struct page_ext_operations debug_guardpage_ops;
691static inline bool set_page_guard(struct zone *zone, struct page *page,
692 unsigned int order, int migratetype) { return false; }
684struct page_ext_operations debug_guardpage_ops = { NULL, };
685static inline void set_page_guard(struct zone *zone, struct page *page,
686 unsigned int order, int migratetype) {}
693static inline void clear_page_guard(struct zone *zone, struct page *page,
694 unsigned int order, int migratetype) {}
695#endif
696
697static inline void set_page_order(struct page *page, unsigned int order)
698{
699 set_page_private(page, order);
700 __SetPageBuddy(page);

--- 698 unchanged lines hidden (view full) ---

1399 unsigned long pfn, int nr_pages)
1400{
1401 int i;
1402
1403 if (!page)
1404 return;
1405
1406 /* Free a large naturally-aligned chunk if possible */
687static inline void clear_page_guard(struct zone *zone, struct page *page,
688 unsigned int order, int migratetype) {}
689#endif
690
691static inline void set_page_order(struct page *page, unsigned int order)
692{
693 set_page_private(page, order);
694 __SetPageBuddy(page);

--- 698 unchanged lines hidden (view full) ---

1393 unsigned long pfn, int nr_pages)
1394{
1395 int i;
1396
1397 if (!page)
1398 return;
1399
1400 /* Free a large naturally-aligned chunk if possible */
1407 if (nr_pages == pageblock_nr_pages &&
1408 (pfn & (pageblock_nr_pages - 1)) == 0) {
1401 if (nr_pages == MAX_ORDER_NR_PAGES &&
1402 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1409 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1403 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1410 __free_pages_boot_core(page, pageblock_order);
1404 __free_pages_boot_core(page, MAX_ORDER-1);
1411 return;
1412 }
1413
1405 return;
1406 }
1407
1414 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1415 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1416 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1408 for (i = 0; i < nr_pages; i++, page++)
1417 __free_pages_boot_core(page, 0);
1409 __free_pages_boot_core(page, 0);
1418 }
1419}
1420
1421/* Completion tracking for deferred_init_memmap() threads */
1422static atomic_t pgdat_init_n_undone __initdata;
1423static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1424
1425static inline void __init pgdat_init_report_one_done(void)
1426{

--- 51 unchanged lines hidden (view full) ---

1478 pfn = zone->zone_start_pfn;
1479
1480 for (; pfn < end_pfn; pfn++) {
1481 if (!pfn_valid_within(pfn))
1482 goto free_range;
1483
1484 /*
1485 * Ensure pfn_valid is checked every
1410}
1411
1412/* Completion tracking for deferred_init_memmap() threads */
1413static atomic_t pgdat_init_n_undone __initdata;
1414static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1415
1416static inline void __init pgdat_init_report_one_done(void)
1417{

--- 51 unchanged lines hidden (view full) ---

1469 pfn = zone->zone_start_pfn;
1470
1471 for (; pfn < end_pfn; pfn++) {
1472 if (!pfn_valid_within(pfn))
1473 goto free_range;
1474
1475 /*
1476 * Ensure pfn_valid is checked every
1486 * pageblock_nr_pages for memory holes
1477 * MAX_ORDER_NR_PAGES for memory holes
1487 */
1478 */
1488 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1479 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1489 if (!pfn_valid(pfn)) {
1490 page = NULL;
1491 goto free_range;
1492 }
1493 }
1494
1495 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1496 page = NULL;
1497 goto free_range;
1498 }
1499
1500 /* Minimise pfn page lookups and scheduler checks */
1480 if (!pfn_valid(pfn)) {
1481 page = NULL;
1482 goto free_range;
1483 }
1484 }
1485
1486 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1487 page = NULL;
1488 goto free_range;
1489 }
1490
1491 /* Minimise pfn page lookups and scheduler checks */
1501 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1492 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1502 page++;
1503 } else {
1504 nr_pages += nr_to_free;
1505 deferred_free_range(free_base_page,
1506 free_base_pfn, nr_to_free);
1507 free_base_page = NULL;
1508 free_base_pfn = nr_to_free = 0;
1509

--- 19 unchanged lines hidden (view full) ---

1529free_range:
1530 /* Free the current block of pages to allocator */
1531 nr_pages += nr_to_free;
1532 deferred_free_range(free_base_page, free_base_pfn,
1533 nr_to_free);
1534 free_base_page = NULL;
1535 free_base_pfn = nr_to_free = 0;
1536 }
1493 page++;
1494 } else {
1495 nr_pages += nr_to_free;
1496 deferred_free_range(free_base_page,
1497 free_base_pfn, nr_to_free);
1498 free_base_page = NULL;
1499 free_base_pfn = nr_to_free = 0;
1500

--- 19 unchanged lines hidden (view full) ---

1520free_range:
1521 /* Free the current block of pages to allocator */
1522 nr_pages += nr_to_free;
1523 deferred_free_range(free_base_page, free_base_pfn,
1524 nr_to_free);
1525 free_base_page = NULL;
1526 free_base_pfn = nr_to_free = 0;
1527 }
1537 /* Free the last block of pages to allocator */
1538 nr_pages += nr_to_free;
1539 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1540
1541 first_init_pfn = max(end_pfn, first_init_pfn);
1542 }
1543
1544 /* Sanity check that the next zone really is unpopulated */
1545 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1546
1547 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,

--- 80 unchanged lines hidden (view full) ---

1628 unsigned long size = 1 << high;
1629
1630 while (high > low) {
1631 area--;
1632 high--;
1633 size >>= 1;
1634 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1635
1528
1529 first_init_pfn = max(end_pfn, first_init_pfn);
1530 }
1531
1532 /* Sanity check that the next zone really is unpopulated */
1533 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1534
1535 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,

--- 80 unchanged lines hidden (view full) ---

1616 unsigned long size = 1 << high;
1617
1618 while (high > low) {
1619 area--;
1620 high--;
1621 size >>= 1;
1622 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1623
1636 /*
1637 * Mark as guard pages (or page), that will allow to
1638 * merge back to allocator when buddy will be freed.
1639 * Corresponding page table entries will not be touched,
1640 * pages will stay not present in virtual address space
1641 */
1642 if (set_page_guard(zone, &page[size], high, migratetype))
1624 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
1625 debug_guardpage_enabled() &&
1626 high < debug_guardpage_minorder()) {
1627 /*
1628 * Mark as guard pages (or page), that will allow to
1629 * merge back to allocator when buddy will be freed.
1630 * Corresponding page table entries will not be touched,
1631 * pages will stay not present in virtual address space
1632 */
1633 set_page_guard(zone, &page[size], high, migratetype);
1643 continue;
1634 continue;
1644
1635 }
1645 list_add(&page[size].lru, &area->free_list[migratetype]);
1646 area->nr_free++;
1647 set_page_order(&page[size], high);
1648 }
1649}
1650
1651static void check_new_page_bad(struct page *page)
1652{

--- 845 unchanged lines hidden (view full) ---

2498 int mt;
2499
2500 BUG_ON(!PageBuddy(page));
2501
2502 zone = page_zone(page);
2503 mt = get_pageblock_migratetype(page);
2504
2505 if (!is_migrate_isolate(mt)) {
1636 list_add(&page[size].lru, &area->free_list[migratetype]);
1637 area->nr_free++;
1638 set_page_order(&page[size], high);
1639 }
1640}
1641
1642static void check_new_page_bad(struct page *page)
1643{

--- 845 unchanged lines hidden (view full) ---

2489 int mt;
2490
2491 BUG_ON(!PageBuddy(page));
2492
2493 zone = page_zone(page);
2494 mt = get_pageblock_migratetype(page);
2495
2496 if (!is_migrate_isolate(mt)) {
2506 /*
2507 * Obey watermarks as if the page was being allocated. We can
2508 * emulate a high-order watermark check with a raised order-0
2509 * watermark, because we already know our high-order page
2510 * exists.
2511 */
2512 watermark = min_wmark_pages(zone) + (1UL << order);
2513 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2497 /* Obey watermarks as if the page was being allocated */
2498 watermark = low_wmark_pages(zone) + (1 << order);
2499 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2514 return 0;
2515
2516 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2517 }
2518
2519 /* Remove page from free list */
2520 list_del(&page->lru);
2521 zone->free_area[order].nr_free--;

--- 452 unchanged lines hidden (view full) ---

2974#endif
2975 return ret;
2976}
2977
2978static DEFINE_RATELIMIT_STATE(nopage_rs,
2979 DEFAULT_RATELIMIT_INTERVAL,
2980 DEFAULT_RATELIMIT_BURST);
2981
2500 return 0;
2501
2502 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2503 }
2504
2505 /* Remove page from free list */
2506 list_del(&page->lru);
2507 zone->free_area[order].nr_free--;

--- 452 unchanged lines hidden (view full) ---

2960#endif
2961 return ret;
2962}
2963
2964static DEFINE_RATELIMIT_STATE(nopage_rs,
2965 DEFAULT_RATELIMIT_INTERVAL,
2966 DEFAULT_RATELIMIT_BURST);
2967
2982void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
2968void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
2983{
2984 unsigned int filter = SHOW_MEM_FILTER_NODES;
2969{
2970 unsigned int filter = SHOW_MEM_FILTER_NODES;
2985 struct va_format vaf;
2986 va_list args;
2987
2988 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2989 debug_guardpage_minorder() > 0)
2990 return;
2991
2992 /*
2993 * This documents exceptions given to allocations in certain
2994 * contexts that are allowed to allocate outside current's set
2995 * of allowed nodes.
2996 */
2997 if (!(gfp_mask & __GFP_NOMEMALLOC))
2998 if (test_thread_flag(TIF_MEMDIE) ||
2999 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3000 filter &= ~SHOW_MEM_FILTER_NODES;
3001 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3002 filter &= ~SHOW_MEM_FILTER_NODES;
3003
2971
2972 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2973 debug_guardpage_minorder() > 0)
2974 return;
2975
2976 /*
2977 * This documents exceptions given to allocations in certain
2978 * contexts that are allowed to allocate outside current's set
2979 * of allowed nodes.
2980 */
2981 if (!(gfp_mask & __GFP_NOMEMALLOC))
2982 if (test_thread_flag(TIF_MEMDIE) ||
2983 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2984 filter &= ~SHOW_MEM_FILTER_NODES;
2985 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
2986 filter &= ~SHOW_MEM_FILTER_NODES;
2987
3004 pr_warn("%s: ", current->comm);
2988 if (fmt) {
2989 struct va_format vaf;
2990 va_list args;
3005
2991
3006 va_start(args, fmt);
3007 vaf.fmt = fmt;
3008 vaf.va = &args;
3009 pr_cont("%pV", &vaf);
3010 va_end(args);
2992 va_start(args, fmt);
3011
2993
3012 pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
2994 vaf.fmt = fmt;
2995 vaf.va = &args;
3013
2996
2997 pr_warn("%pV", &vaf);
2998
2999 va_end(args);
3000 }
3001
3002 pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
3003 current->comm, order, gfp_mask, &gfp_mask);
3014 dump_stack();
3015 if (!should_suppress_show_mem())
3016 show_mem(filter);
3017}
3018
3019static inline struct page *
3020__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3021 const struct alloc_context *ac, unsigned long *did_some_progress)

--- 125 unchanged lines hidden (view full) ---

3147 */
3148 count_vm_event(COMPACTFAIL);
3149
3150 cond_resched();
3151
3152 return NULL;
3153}
3154
3004 dump_stack();
3005 if (!should_suppress_show_mem())
3006 show_mem(filter);
3007}
3008
3009static inline struct page *
3010__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3011 const struct alloc_context *ac, unsigned long *did_some_progress)

--- 125 unchanged lines hidden (view full) ---

3137 */
3138 count_vm_event(COMPACTFAIL);
3139
3140 cond_resched();
3141
3142 return NULL;
3143}
3144
3155static inline bool
3156should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3157 enum compact_result compact_result,
3158 enum compact_priority *compact_priority,
3159 int *compaction_retries)
3160{
3161 int max_retries = MAX_COMPACT_RETRIES;
3162 int min_priority;
3163
3164 if (!order)
3165 return false;
3166
3167 if (compaction_made_progress(compact_result))
3168 (*compaction_retries)++;
3169
3170 /*
3171 * compaction considers all the zone as desperately out of memory
3172 * so it doesn't really make much sense to retry except when the
3173 * failure could be caused by insufficient priority
3174 */
3175 if (compaction_failed(compact_result))
3176 goto check_priority;
3177
3178 /*
3179 * make sure the compaction wasn't deferred or didn't bail out early
3180 * due to locks contention before we declare that we should give up.
3181 * But do not retry if the given zonelist is not suitable for
3182 * compaction.
3183 */
3184 if (compaction_withdrawn(compact_result))
3185 return compaction_zonelist_suitable(ac, order, alloc_flags);
3186
3187 /*
3188 * !costly requests are much more important than __GFP_REPEAT
3189 * costly ones because they are de facto nofail and invoke OOM
3190 * killer to move on while costly can fail and users are ready
3191 * to cope with that. 1/4 retries is rather arbitrary but we
3192 * would need much more detailed feedback from compaction to
3193 * make a better decision.
3194 */
3195 if (order > PAGE_ALLOC_COSTLY_ORDER)
3196 max_retries /= 4;
3197 if (*compaction_retries <= max_retries)
3198 return true;
3199
3200 /*
3201 * Make sure there are attempts at the highest priority if we exhausted
3202 * all retries or failed at the lower priorities.
3203 */
3204check_priority:
3205 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3206 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3207 if (*compact_priority > min_priority) {
3208 (*compact_priority)--;
3209 *compaction_retries = 0;
3210 return true;
3211 }
3212 return false;
3213}
3214#else
3215static inline struct page *
3216__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3217 unsigned int alloc_flags, const struct alloc_context *ac,
3218 enum compact_priority prio, enum compact_result *compact_result)
3219{
3220 *compact_result = COMPACT_SKIPPED;
3221 return NULL;
3222}
3223
3145#else
3146static inline struct page *
3147__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3148 unsigned int alloc_flags, const struct alloc_context *ac,
3149 enum compact_priority prio, enum compact_result *compact_result)
3150{
3151 *compact_result = COMPACT_SKIPPED;
3152 return NULL;
3153}
3154
3155#endif /* CONFIG_COMPACTION */
3156
3224static inline bool
3225should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3226 enum compact_result compact_result,
3227 enum compact_priority *compact_priority,
3157static inline bool
3158should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3159 enum compact_result compact_result,
3160 enum compact_priority *compact_priority,
3228 int *compaction_retries)
3161 int compaction_retries)
3229{
3230 struct zone *zone;
3231 struct zoneref *z;
3232
3233 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3234 return false;
3235
3236 /*

--- 5 unchanged lines hidden (view full) ---

3242 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3243 ac->nodemask) {
3244 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3245 ac_classzone_idx(ac), alloc_flags))
3246 return true;
3247 }
3248 return false;
3249}
3162{
3163 struct zone *zone;
3164 struct zoneref *z;
3165
3166 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3167 return false;
3168
3169 /*

--- 5 unchanged lines hidden (view full) ---

3175 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3176 ac->nodemask) {
3177 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3178 ac_classzone_idx(ac), alloc_flags))
3179 return true;
3180 }
3181 return false;
3182}
3250#endif /* CONFIG_COMPACTION */
3251
3252/* Perform direct synchronous page reclaim */
3253static int
3254__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3255 const struct alloc_context *ac)
3256{
3257 struct reclaim_state reclaim_state;
3258 int progress;

--- 134 unchanged lines hidden (view full) ---

3393 * applicable zone list (with a backoff mechanism which is a function of
3394 * no_progress_loops).
3395 *
3396 * Returns true if a retry is viable or false to enter the oom path.
3397 */
3398static inline bool
3399should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3400 struct alloc_context *ac, int alloc_flags,
3183
3184/* Perform direct synchronous page reclaim */
3185static int
3186__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3187 const struct alloc_context *ac)
3188{
3189 struct reclaim_state reclaim_state;
3190 int progress;

--- 134 unchanged lines hidden (view full) ---

3325 * applicable zone list (with a backoff mechanism which is a function of
3326 * no_progress_loops).
3327 *
3328 * Returns true if a retry is viable or false to enter the oom path.
3329 */
3330static inline bool
3331should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3332 struct alloc_context *ac, int alloc_flags,
3401 bool did_some_progress, int *no_progress_loops)
3333 bool did_some_progress, int no_progress_loops)
3402{
3403 struct zone *zone;
3404 struct zoneref *z;
3405
3406 /*
3334{
3335 struct zone *zone;
3336 struct zoneref *z;
3337
3338 /*
3407 * Costly allocations might have made a progress but this doesn't mean
3408 * their order will become available due to high fragmentation so
3409 * always increment the no progress counter for them
3410 */
3411 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3412 *no_progress_loops = 0;
3413 else
3414 (*no_progress_loops)++;
3415
3416 /*
3417 * Make sure we converge to OOM if we cannot make any progress
3418 * several times in the row.
3419 */
3339 * Make sure we converge to OOM if we cannot make any progress
3340 * several times in the row.
3341 */
3420 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
3342 if (no_progress_loops > MAX_RECLAIM_RETRIES)
3421 return false;
3422
3423 /*
3424 * Keep reclaiming pages while there is a chance this will lead
3425 * somewhere. If none of the target zones can satisfy our allocation
3426 * request even if all reclaimable pages are considered then we are
3427 * screwed and have to go OOM.
3428 */
3429 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3430 ac->nodemask) {
3431 unsigned long available;
3432 unsigned long reclaimable;
3433
3434 available = reclaimable = zone_reclaimable_pages(zone);
3343 return false;
3344
3345 /*
3346 * Keep reclaiming pages while there is a chance this will lead
3347 * somewhere. If none of the target zones can satisfy our allocation
3348 * request even if all reclaimable pages are considered then we are
3349 * screwed and have to go OOM.
3350 */
3351 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3352 ac->nodemask) {
3353 unsigned long available;
3354 unsigned long reclaimable;
3355
3356 available = reclaimable = zone_reclaimable_pages(zone);
3435 available -= DIV_ROUND_UP((*no_progress_loops) * available,
3357 available -= DIV_ROUND_UP(no_progress_loops * available,
3436 MAX_RECLAIM_RETRIES);
3437 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3438
3439 /*
3440 * Would the allocation succeed if we reclaimed the whole
3441 * available?
3442 */
3443 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),

--- 44 unchanged lines hidden (view full) ---

3488 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3489 struct page *page = NULL;
3490 unsigned int alloc_flags;
3491 unsigned long did_some_progress;
3492 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
3493 enum compact_result compact_result;
3494 int compaction_retries = 0;
3495 int no_progress_loops = 0;
3358 MAX_RECLAIM_RETRIES);
3359 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3360
3361 /*
3362 * Would the allocation succeed if we reclaimed the whole
3363 * available?
3364 */
3365 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),

--- 44 unchanged lines hidden (view full) ---

3410 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3411 struct page *page = NULL;
3412 unsigned int alloc_flags;
3413 unsigned long did_some_progress;
3414 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
3415 enum compact_result compact_result;
3416 int compaction_retries = 0;
3417 int no_progress_loops = 0;
3496 unsigned long alloc_start = jiffies;
3497 unsigned int stall_timeout = 10 * HZ;
3498
3499 /*
3500 * In the slowpath, we sanity check order to avoid ever trying to
3501 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3502 * be using allocators in order of preference for an area that is
3503 * too large.
3504 */
3505 if (order >= MAX_ORDER) {

--- 128 unchanged lines hidden (view full) ---

3634 goto got_pg;
3635
3636 /* Try direct compaction and then allocating */
3637 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3638 compact_priority, &compact_result);
3639 if (page)
3640 goto got_pg;
3641
3418
3419 /*
3420 * In the slowpath, we sanity check order to avoid ever trying to
3421 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3422 * be using allocators in order of preference for an area that is
3423 * too large.
3424 */
3425 if (order >= MAX_ORDER) {

--- 128 unchanged lines hidden (view full) ---

3554 goto got_pg;
3555
3556 /* Try direct compaction and then allocating */
3557 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3558 compact_priority, &compact_result);
3559 if (page)
3560 goto got_pg;
3561
3562 if (order && compaction_made_progress(compact_result))
3563 compaction_retries++;
3564
3642 /* Do not loop if specifically requested */
3643 if (gfp_mask & __GFP_NORETRY)
3644 goto nopage;
3645
3646 /*
3647 * Do not retry costly high order allocations unless they are
3648 * __GFP_REPEAT
3649 */
3650 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
3651 goto nopage;
3652
3565 /* Do not loop if specifically requested */
3566 if (gfp_mask & __GFP_NORETRY)
3567 goto nopage;
3568
3569 /*
3570 * Do not retry costly high order allocations unless they are
3571 * __GFP_REPEAT
3572 */
3573 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
3574 goto nopage;
3575
3653 /* Make sure we know about allocations which stall for too long */
3654 if (time_after(jiffies, alloc_start + stall_timeout)) {
3655 warn_alloc(gfp_mask,
3656 "page alloction stalls for %ums, order:%u\n",
3657 jiffies_to_msecs(jiffies-alloc_start), order);
3658 stall_timeout += 10 * HZ;
3659 }
3576 /*
3577 * Costly allocations might have made a progress but this doesn't mean
3578 * their order will become available due to high fragmentation so
3579 * always increment the no progress counter for them
3580 */
3581 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3582 no_progress_loops = 0;
3583 else
3584 no_progress_loops++;
3660
3661 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3585
3586 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3662 did_some_progress > 0, &no_progress_loops))
3587 did_some_progress > 0, no_progress_loops))
3663 goto retry;
3664
3665 /*
3666 * It doesn't make any sense to retry for the compaction if the order-0
3667 * reclaim is not able to make any progress because the current
3668 * implementation of the compaction depends on the sufficient amount
3669 * of free memory (see __compaction_suitable)
3670 */
3671 if (did_some_progress > 0 &&
3672 should_compact_retry(ac, order, alloc_flags,
3673 compact_result, &compact_priority,
3588 goto retry;
3589
3590 /*
3591 * It doesn't make any sense to retry for the compaction if the order-0
3592 * reclaim is not able to make any progress because the current
3593 * implementation of the compaction depends on the sufficient amount
3594 * of free memory (see __compaction_suitable)
3595 */
3596 if (did_some_progress > 0 &&
3597 should_compact_retry(ac, order, alloc_flags,
3598 compact_result, &compact_priority,
3674 &compaction_retries))
3599 compaction_retries))
3675 goto retry;
3676
3677 /* Reclaim has failed us, start killing things */
3678 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3679 if (page)
3680 goto got_pg;
3681
3682 /* Retry as long as the OOM killer is making progress */
3683 if (did_some_progress) {
3684 no_progress_loops = 0;
3685 goto retry;
3686 }
3687
3688nopage:
3600 goto retry;
3601
3602 /* Reclaim has failed us, start killing things */
3603 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3604 if (page)
3605 goto got_pg;
3606
3607 /* Retry as long as the OOM killer is making progress */
3608 if (did_some_progress) {
3609 no_progress_loops = 0;
3610 goto retry;
3611 }
3612
3613nopage:
3689 warn_alloc(gfp_mask,
3690 "page allocation failure: order:%u", order);
3614 warn_alloc_failed(gfp_mask, order, NULL);
3691got_pg:
3692 return page;
3693}
3694
3695/*
3696 * This is the 'heart' of the zoned buddy allocator.
3697 */
3698struct page *

--- 932 unchanged lines hidden (view full) ---

4631 * This results in maximum locality--normal zone overflows into local
4632 * DMA zone, if any--but risks exhausting DMA zone.
4633 */
4634static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4635{
4636 int j;
4637 struct zonelist *zonelist;
4638
3615got_pg:
3616 return page;
3617}
3618
3619/*
3620 * This is the 'heart' of the zoned buddy allocator.
3621 */
3622struct page *

--- 932 unchanged lines hidden (view full) ---

4555 * This results in maximum locality--normal zone overflows into local
4556 * DMA zone, if any--but risks exhausting DMA zone.
4557 */
4558static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
4559{
4560 int j;
4561 struct zonelist *zonelist;
4562
4639 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4563 zonelist = &pgdat->node_zonelists[0];
4640 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4641 ;
4642 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4643 zonelist->_zonerefs[j].zone = NULL;
4644 zonelist->_zonerefs[j].zone_idx = 0;
4645}
4646
4647/*
4648 * Build gfp_thisnode zonelists
4649 */
4650static void build_thisnode_zonelists(pg_data_t *pgdat)
4651{
4652 int j;
4653 struct zonelist *zonelist;
4654
4564 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
4565 ;
4566 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
4567 zonelist->_zonerefs[j].zone = NULL;
4568 zonelist->_zonerefs[j].zone_idx = 0;
4569}
4570
4571/*
4572 * Build gfp_thisnode zonelists
4573 */
4574static void build_thisnode_zonelists(pg_data_t *pgdat)
4575{
4576 int j;
4577 struct zonelist *zonelist;
4578
4655 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
4579 zonelist = &pgdat->node_zonelists[1];
4656 j = build_zonelists_node(pgdat, zonelist, 0);
4657 zonelist->_zonerefs[j].zone = NULL;
4658 zonelist->_zonerefs[j].zone_idx = 0;
4659}
4660
4661/*
4662 * Build zonelists ordered by zone and nodes within zones.
4663 * This results in conserving DMA zone[s] until all Normal memory is

--- 4 unchanged lines hidden (view full) ---

4668
4669static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4670{
4671 int pos, j, node;
4672 int zone_type; /* needs to be signed */
4673 struct zone *z;
4674 struct zonelist *zonelist;
4675
4580 j = build_zonelists_node(pgdat, zonelist, 0);
4581 zonelist->_zonerefs[j].zone = NULL;
4582 zonelist->_zonerefs[j].zone_idx = 0;
4583}
4584
4585/*
4586 * Build zonelists ordered by zone and nodes within zones.
4587 * This results in conserving DMA zone[s] until all Normal memory is

--- 4 unchanged lines hidden (view full) ---

4592
4593static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4594{
4595 int pos, j, node;
4596 int zone_type; /* needs to be signed */
4597 struct zone *z;
4598 struct zonelist *zonelist;
4599
4676 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4600 zonelist = &pgdat->node_zonelists[0];
4677 pos = 0;
4678 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4679 for (j = 0; j < nr_nodes; j++) {
4680 node = node_order[j];
4681 z = &NODE_DATA(node)->node_zones[zone_type];
4682 if (managed_zone(z)) {
4683 zoneref_set_zone(z,
4684 &zonelist->_zonerefs[pos++]);

--- 118 unchanged lines hidden (view full) ---

4803static void build_zonelists(pg_data_t *pgdat)
4804{
4805 int node, local_node;
4806 enum zone_type j;
4807 struct zonelist *zonelist;
4808
4809 local_node = pgdat->node_id;
4810
4601 pos = 0;
4602 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4603 for (j = 0; j < nr_nodes; j++) {
4604 node = node_order[j];
4605 z = &NODE_DATA(node)->node_zones[zone_type];
4606 if (managed_zone(z)) {
4607 zoneref_set_zone(z,
4608 &zonelist->_zonerefs[pos++]);

--- 118 unchanged lines hidden (view full) ---

4727static void build_zonelists(pg_data_t *pgdat)
4728{
4729 int node, local_node;
4730 enum zone_type j;
4731 struct zonelist *zonelist;
4732
4733 local_node = pgdat->node_id;
4734
4811 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
4735 zonelist = &pgdat->node_zonelists[0];
4812 j = build_zonelists_node(pgdat, zonelist, 0);
4813
4814 /*
4815 * Now we build the zonelist so that it contains the zones
4816 * of all the other nodes.
4817 * We don't want to pressure a particular node, so when
4818 * building the zones for node N, we make sure that the
4819 * zones coming right after the local ones are those from

--- 256 unchanged lines hidden (view full) ---

5076 continue;
5077 if (!early_pfn_in_nid(pfn, nid))
5078 continue;
5079 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5080 break;
5081
5082#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5083 /*
4736 j = build_zonelists_node(pgdat, zonelist, 0);
4737
4738 /*
4739 * Now we build the zonelist so that it contains the zones
4740 * of all the other nodes.
4741 * We don't want to pressure a particular node, so when
4742 * building the zones for node N, we make sure that the
4743 * zones coming right after the local ones are those from

--- 256 unchanged lines hidden (view full) ---

5000 continue;
5001 if (!early_pfn_in_nid(pfn, nid))
5002 continue;
5003 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5004 break;
5005
5006#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5007 /*
5008 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
5009 * from zone_movable_pfn[nid] to end of each node should be
5010 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
5011 */
5012 if (!mirrored_kernelcore && zone_movable_pfn[nid])
5013 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
5014 continue;
5015
5016 /*
5084 * Check given memblock attribute by firmware which can affect
5085 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5086 * mirrored, it's an overlapped memmap init. skip it.
5087 */
5088 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5089 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5090 for_each_memblock(memory, tmp)
5091 if (pfn < memblock_region_memory_end_pfn(tmp))

--- 426 unchanged lines hidden (view full) ---

5518 /* Only adjust if ZONE_MOVABLE is on this node */
5519 if (zone_movable_pfn[nid]) {
5520 /* Size ZONE_MOVABLE */
5521 if (zone_type == ZONE_MOVABLE) {
5522 *zone_start_pfn = zone_movable_pfn[nid];
5523 *zone_end_pfn = min(node_end_pfn,
5524 arch_zone_highest_possible_pfn[movable_zone]);
5525
5017 * Check given memblock attribute by firmware which can affect
5018 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5019 * mirrored, it's an overlapped memmap init. skip it.
5020 */
5021 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5022 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5023 for_each_memblock(memory, tmp)
5024 if (pfn < memblock_region_memory_end_pfn(tmp))

--- 426 unchanged lines hidden (view full) ---

5451 /* Only adjust if ZONE_MOVABLE is on this node */
5452 if (zone_movable_pfn[nid]) {
5453 /* Size ZONE_MOVABLE */
5454 if (zone_type == ZONE_MOVABLE) {
5455 *zone_start_pfn = zone_movable_pfn[nid];
5456 *zone_end_pfn = min(node_end_pfn,
5457 arch_zone_highest_possible_pfn[movable_zone]);
5458
5526 /* Adjust for ZONE_MOVABLE starting within this range */
5527 } else if (!mirrored_kernelcore &&
5528 *zone_start_pfn < zone_movable_pfn[nid] &&
5529 *zone_end_pfn > zone_movable_pfn[nid]) {
5530 *zone_end_pfn = zone_movable_pfn[nid];
5531
5532 /* Check if this whole range is within ZONE_MOVABLE */
5533 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5534 *zone_start_pfn = *zone_end_pfn;
5535 }
5536}
5537
5538/*
5539 * Return the number of pages a zone spans in a node, including holes

--- 87 unchanged lines hidden (view full) ---

5627 &zone_start_pfn, &zone_end_pfn);
5628 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5629
5630 /*
5631 * ZONE_MOVABLE handling.
5632 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5633 * and vice versa.
5634 */
5459 /* Check if this whole range is within ZONE_MOVABLE */
5460 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5461 *zone_start_pfn = *zone_end_pfn;
5462 }
5463}
5464
5465/*
5466 * Return the number of pages a zone spans in a node, including holes

--- 87 unchanged lines hidden (view full) ---

5554 &zone_start_pfn, &zone_end_pfn);
5555 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5556
5557 /*
5558 * ZONE_MOVABLE handling.
5559 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5560 * and vice versa.
5561 */
5635 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5636 unsigned long start_pfn, end_pfn;
5637 struct memblock_region *r;
5562 if (zone_movable_pfn[nid]) {
5563 if (mirrored_kernelcore) {
5564 unsigned long start_pfn, end_pfn;
5565 struct memblock_region *r;
5638
5566
5639 for_each_memblock(memory, r) {
5640 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5641 zone_start_pfn, zone_end_pfn);
5642 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5643 zone_start_pfn, zone_end_pfn);
5567 for_each_memblock(memory, r) {
5568 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5569 zone_start_pfn, zone_end_pfn);
5570 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5571 zone_start_pfn, zone_end_pfn);
5644
5572
5645 if (zone_type == ZONE_MOVABLE &&
5646 memblock_is_mirror(r))
5647 nr_absent += end_pfn - start_pfn;
5573 if (zone_type == ZONE_MOVABLE &&
5574 memblock_is_mirror(r))
5575 nr_absent += end_pfn - start_pfn;
5648
5576
5649 if (zone_type == ZONE_NORMAL &&
5650 !memblock_is_mirror(r))
5651 nr_absent += end_pfn - start_pfn;
5577 if (zone_type == ZONE_NORMAL &&
5578 !memblock_is_mirror(r))
5579 nr_absent += end_pfn - start_pfn;
5580 }
5581 } else {
5582 if (zone_type == ZONE_NORMAL)
5583 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5652 }
5653 }
5654
5655 return nr_absent;
5656}
5657
5658#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5659static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,

--- 1337 unchanged lines hidden (view full) ---

6997 if (!str)
6998 return 0;
6999 hashdist = simple_strtoul(str, &str, 0);
7000 return 1;
7001}
7002__setup("hashdist=", set_hashdist);
7003#endif
7004
5584 }
5585 }
5586
5587 return nr_absent;
5588}
5589
5590#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5591static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,

--- 1337 unchanged lines hidden (view full) ---

6929 if (!str)
6930 return 0;
6931 hashdist = simple_strtoul(str, &str, 0);
6932 return 1;
6933}
6934__setup("hashdist=", set_hashdist);
6935#endif
6936
7005#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7006/*
6937/*
7007 * Returns the number of pages that arch has reserved but
7008 * is not known to alloc_large_system_hash().
7009 */
7010static unsigned long __init arch_reserved_kernel_pages(void)
7011{
7012 return 0;
7013}
7014#endif
7015
7016/*
7017 * allocate a large system hash table from bootmem
7018 * - it is assumed that the hash table must contain an exact power-of-2
7019 * quantity of entries
7020 * - limit is the number of hash buckets, not the total allocation size
7021 */
7022void *__init alloc_large_system_hash(const char *tablename,
7023 unsigned long bucketsize,
7024 unsigned long numentries,

--- 7 unchanged lines hidden (view full) ---

7032 unsigned long long max = high_limit;
7033 unsigned long log2qty, size;
7034 void *table = NULL;
7035
7036 /* allow the kernel cmdline to have a say */
7037 if (!numentries) {
7038 /* round applicable memory size up to nearest megabyte */
7039 numentries = nr_kernel_pages;
6938 * allocate a large system hash table from bootmem
6939 * - it is assumed that the hash table must contain an exact power-of-2
6940 * quantity of entries
6941 * - limit is the number of hash buckets, not the total allocation size
6942 */
6943void *__init alloc_large_system_hash(const char *tablename,
6944 unsigned long bucketsize,
6945 unsigned long numentries,

--- 7 unchanged lines hidden (view full) ---

6953 unsigned long long max = high_limit;
6954 unsigned long log2qty, size;
6955 void *table = NULL;
6956
6957 /* allow the kernel cmdline to have a say */
6958 if (!numentries) {
6959 /* round applicable memory size up to nearest megabyte */
6960 numentries = nr_kernel_pages;
7040 numentries -= arch_reserved_kernel_pages();
7041
7042 /* It isn't necessary when PAGE_SIZE >= 1MB */
7043 if (PAGE_SHIFT < 20)
7044 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7045
7046 /* limit to 1 bucket per 2^scale bytes of low memory */
7047 if (scale > PAGE_SHIFT)
7048 numentries >>= (scale - PAGE_SHIFT);

--- 495 unchanged lines hidden ---
6961
6962 /* It isn't necessary when PAGE_SIZE >= 1MB */
6963 if (PAGE_SHIFT < 20)
6964 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6965
6966 /* limit to 1 bucket per 2^scale bytes of low memory */
6967 if (scale > PAGE_SHIFT)
6968 numentries >>= (scale - PAGE_SHIFT);

--- 495 unchanged lines hidden ---