Lines Matching full:zone

69 		struct zone *zone;
76 /* Identify the zone and nodelist */
80 zone = &pgdat->node_zones[zoneid];
81 if (!populated_zone(zone))
87 zone->name);
90 for_each_zone_zonelist(zone, z, zonelist, zoneid)
91 pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
105 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
115 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
122 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
129 "Node/Zone ID: %lu -> %lu\n",
311 * Sum pages in active regions for movable zone.
331 * This finds a zone that can be used for ZONE_MOVABLE pages. The
333 * increasing memory addresses so that the "highest" populated zone is used
352 * Find the PFN the Movable zone begins in each node. Kernel memory
582 unsigned long zone, int nid)
585 set_page_links(page, zone, nid, pfn);
594 if (!is_highmem_idx(zone))
670 * Initialize a reserved page unconditionally, finding its zone first.
680 struct zone *zone = &pgdat->node_zones[zid];
682 if (zone_spans_pfn(zone, pfn))
727 * prev_end_pfn static that contains the end of previous zone
803 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
805 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
809 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
841 * - zone and node links point to zone and node that span the page if the
842 * hole is in the middle of a zone
843 * - zone and node links point to adjacent zone/node if the hole falls on
844 * the zone boundary; the pages in such holes will be prepended to the
845 * zone/node above the hole except for the trailing pages in the last
846 * section that will be appended to the zone/node below.
850 int zone, int node)
856 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
862 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
863 node, zone_names[zone], pgcnt);
873 * zone stats (e.g., nr_isolate_pageblock) are touched.
875 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
895 if (zone == ZONE_DEVICE) {
911 if (overlap_memmap_init(zone, &pfn))
920 __init_single_page(page, pfn, zone, nid);
923 if (zone == ZONE_DEVICE)
944 static void __init memmap_init_zone_range(struct zone *zone,
949 unsigned long zone_start_pfn = zone->zone_start_pfn;
950 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
951 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
979 struct zone *zone = node->node_zones + j;
981 if (!populated_zone(zone))
984 memmap_init_zone_range(zone, start_pfn, end_pfn,
994 * Append the pages in this hole to the highest zone in the last
1016 * phase for it to be fully associated with a zone.
1111 void __ref memmap_init_zone_device(struct zone *zone,
1117 struct pglist_data *pgdat = zone->zone_pgdat;
1120 unsigned long zone_idx = zone_idx(zone);
1155 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1159 * is distributed. This helper function adjusts the zone ranges
1161 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1223 /* Return the number of page frames in holes in a zone on a node */
1231 /* zone is empty, we don't have any absent pages */
1266 * Return the number of pages a zone spans in a node, including holes
1279 /* Get the start and end of the zone */
1285 /* Check that this node has pages within the zone's required range */
1289 /* Move the zone boundaries inside the node if necessary */
1299 struct zone *z;
1347 struct zone *zone = pgdat->node_zones + i;
1364 zone->zone_start_pfn = zone_start_pfn;
1366 zone->zone_start_pfn = 0;
1367 zone->spanned_pages = spanned;
1368 zone->present_pages = real_size;
1370 zone->present_early_pages = real_size;
1424 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1427 atomic_long_set(&zone->managed_pages, remaining_pages);
1428 zone_set_nid(zone, nid);
1429 zone->name = zone_names[idx];
1430 zone->zone_pgdat = NODE_DATA(nid);
1431 spin_lock_init(&zone->lock);
1432 zone_seqlock_init(zone);
1433 zone_pcp_init(zone);
1436 static void __meminit zone_init_free_lists(struct zone *zone)
1440 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1441 zone->free_area[order].nr_free = 0;
1445 INIT_LIST_HEAD(&zone->unaccepted_pages);
1449 void __meminit init_currently_empty_zone(struct zone *zone,
1453 struct pglist_data *pgdat = zone->zone_pgdat;
1454 int zone_idx = zone_idx(zone) + 1;
1459 zone->zone_start_pfn = zone_start_pfn;
1462 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
1464 (unsigned long)zone_idx(zone),
1467 zone_init_free_lists(zone);
1468 zone->initialized = 1;
1473 * Calculate the size of the zone->pageblock_flags rounded to an unsigned long
1492 static void __ref setup_usemap(struct zone *zone)
1494 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1495 zone->spanned_pages);
1496 zone->pageblock_flags = NULL;
1498 zone->pageblock_flags =
1500 zone_to_nid(zone));
1501 if (!zone->pageblock_flags)
1502 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1503 usemapsize, zone->name, zone_to_nid(zone));
1507 static inline void setup_usemap(struct zone *zone) {}
1546 * Set up the zone data structures
1588 struct zone *zone = pgdat->node_zones + z;
1590 zone->present_pages = 0;
1591 zone_init_internals(zone, z, nid, 0);
1605 struct zone *zone = pgdat->node_zones + j;
1606 unsigned long size = zone->spanned_pages;
1609 * Initialize zone->managed_pages as 0 , it will be reset
1612 zone_init_internals(zone, j, nid, zone->present_pages);
1617 setup_usemap(zone);
1618 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1659 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1757 struct zone *zone = &pgdat->node_zones[zone_type];
1758 if (populated_zone(zone)) {
1812 * free_area_init - Initialise all pg_data_t and zone data
1813 * @max_zone_pfn: an array of max PFNs for each zone
1817 * zone in each node and their holes is calculated. If the maximum PFN
1818 * between two adjacent zones match, it is assumed that the zone is empty.
1820 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1827 int i, nid, zone;
1830 /* Record where the zone boundaries are */
1841 zone = MAX_NR_ZONES - i - 1;
1843 zone = i;
1845 if (zone == ZONE_MOVABLE)
1848 end_pfn = max(max_zone_pfn[zone], start_pfn);
1849 arch_zone_lowest_possible_pfn[zone] = start_pfn;
1850 arch_zone_highest_possible_pfn[zone] = end_pfn;
1859 /* Print out the zone ranges */
1860 pr_info("Zone ranges:\n");
1877 pr_info("Movable zone start for each node\n");
2034 static unsigned long __init deferred_init_pages(struct zone *zone,
2037 int nid = zone_to_nid(zone);
2039 int zid = zone_idx(zone);
2062 struct zone *zone)
2064 int nid = zone_to_nid(zone);
2083 nr_pages += deferred_init_pages(zone, spfn, chunk_end);
2102 struct zone *zone = arg;
2104 deferred_init_memmap_chunk(start_pfn, end_pfn, zone);
2121 struct zone *zone;
2141 * Once we unlock here, the zone cannot be grown anymore, thus if an
2142 * interrupt thread must allocate this early in boot, zone must be
2147 /* Only the highest zone is deferred */
2148 zone = pgdat->node_zones + pgdat->nr_zones - 1;
2149 last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone));
2153 .fn_arg = zone,
2164 /* Sanity check that the next zone really is unpopulated */
2165 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
2175 * If this zone has deferred pages, try to grow it by initializing enough
2181 * Return true when zone was grown, otherwise return false. We return true even
2185 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2188 pg_data_t *pgdat = zone->zone_pgdat;
2193 /* Only the last zone may have deferred pages */
2194 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2200 * If someone grew this zone while we were waiting for spinlock, return
2217 nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
2219 nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone);
2223 * There were no pages to initialize and free which means the zone's
2266 void set_zone_contiguous(struct zone *zone)
2268 unsigned long block_start_pfn = zone->zone_start_pfn;
2272 for (; block_start_pfn < zone_end_pfn(zone);
2276 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2279 block_end_pfn, zone))
2285 zone->contiguous = true;
2296 struct zone *zone, *izone = NULL;
2298 for_each_zone(zone) {
2299 if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid)
2302 if (zone_intersects(zone, start_pfn, nr_pages)) {
2305 izone = zone;
2316 struct zone *zone;
2350 for_each_populated_zone(zone)
2351 set_zone_contiguous(zone);