Lines Matching defs:zone
336 * and each of them corresponds to one zone. For each zone bitmap
340 * struct memory_bitmap contains a pointer to the main list of zone
343 * zone bitmap objects and bitmap block objects.
353 * PFNs that correspond to the start and end of the represented zone.
361 * access to the bits. There is one radix tree for each zone (as returned
390 * populated memory zone.
406 struct mem_zone_bm_rtree *zone;
415 struct linked_page *p_list; /* list of pages used to store zone
465 * linked list in order. This is guaranteed by the zone->blocks
468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
475 block_nr = zone->blocks;
485 for (i = zone->levels; i < levels_needed; i++) {
487 &zone->nodes);
491 node->data[0] = (unsigned long)zone->rtree;
492 zone->rtree = node;
493 zone->levels += 1;
497 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
502 node = zone->rtree;
503 dst = &zone->rtree;
504 block_nr = zone->blocks;
505 for (i = zone->levels; i > 0; i--) {
510 &zone->nodes);
522 zone->blocks += 1;
528 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
532 * create_zone_bm_rtree - Create a radix tree for one zone.
536 * zone.
544 struct mem_zone_bm_rtree *zone;
549 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
550 if (!zone)
553 INIT_LIST_HEAD(&zone->nodes);
554 INIT_LIST_HEAD(&zone->leaves);
555 zone->start_pfn = start;
556 zone->end_pfn = end;
560 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
561 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
566 return zone;
576 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
581 list_for_each_entry(node, &zone->nodes, list)
584 list_for_each_entry(node, &zone->leaves, list)
590 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
592 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
630 struct zone *zone;
634 for_each_populated_zone(zone) {
638 zone_start = zone->zone_start_pfn;
639 zone_end = zone_end_pfn(zone);
660 /* Merge this zone's range of PFNs with the existing one */
700 struct mem_zone_bm_rtree *zone;
702 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
704 if (!zone) {
708 list_add_tail(&zone->list, &bm->zones);
729 struct mem_zone_bm_rtree *zone;
731 list_for_each_entry(zone, &bm->zones, list)
732 free_zone_bm_rtree(zone, clear_nosave_free);
743 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
751 struct mem_zone_bm_rtree *curr, *zone;
755 zone = bm->cur.zone;
757 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
760 zone = NULL;
762 /* Find the right zone */
765 zone = curr;
770 if (!zone)
775 * We have found the zone. Now walk the radix tree to find the leaf node
780 * If the zone we wish to scan is the current zone and the
785 if (zone == bm->cur.zone &&
786 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
789 node = zone->rtree;
790 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
792 for (i = zone->levels; i > 0; i--) {
803 bm->cur.zone = zone;
805 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
810 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
887 * zone's radix tree or the first node in the radix tree of the
888 * next zone.
894 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
903 /* No more nodes, goto next zone */
904 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
905 bm->cur.zone = list_entry(bm->cur.zone->list.next,
907 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
940 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
963 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
967 list_for_each_entry(node, &zone->nodes, list)
970 list_for_each_entry(node, &zone->leaves, list)
976 struct mem_zone_bm_rtree *zone;
979 list_for_each_entry(zone, &bm->zones, list)
980 recycle_zone_bm_rtree(zone);
1219 * @zone: Memory zone to carry out the computation for.
1222 * image data structures for @zone (usually, the returned value is greater than
1225 unsigned int snapshot_additional_pages(struct zone *zone)
1229 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1245 static void mark_free_pages(struct zone *zone)
1252 if (zone_is_empty(zone))
1255 spin_lock_irqsave(&zone->lock, flags);
1257 max_zone_pfn = zone_end_pfn(zone);
1258 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1267 if (page_zone(page) != zone)
1276 &zone->free_area[order].free_list[t], buddy_list) {
1289 spin_unlock_irqrestore(&zone->lock, flags);
1300 struct zone *zone;
1303 for_each_populated_zone(zone)
1304 if (is_highmem(zone))
1305 cnt += zone_page_state(zone, NR_FREE_PAGES);
1318 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1326 if (!page || page_zone(page) != zone)
1348 struct zone *zone;
1351 for_each_populated_zone(zone) {
1354 if (!is_highmem(zone))
1357 mark_free_pages(zone);
1358 max_zone_pfn = zone_end_pfn(zone);
1359 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1360 if (saveable_highmem_page(zone, pfn))
1377 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1385 if (!page || page_zone(page) != zone)
1411 struct zone *zone;
1415 for_each_populated_zone(zone) {
1416 if (is_highmem(zone))
1419 mark_free_pages(zone);
1420 max_zone_pfn = zone_end_pfn(zone);
1421 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1422 if (saveable_page(zone, pfn))
1469 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1471 return is_highmem(zone) ?
1472 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1506 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1526 struct zone *zone;
1529 for_each_populated_zone(zone) {
1532 mark_free_pages(zone);
1533 max_zone_pfn = zone_end_pfn(zone);
1534 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1535 if (page_is_saveable(zone, pfn))
1828 struct zone *zone;
1871 for_each_populated_zone(zone) {
1872 size += snapshot_additional_pages(zone);
1873 if (is_highmem(zone))
1874 highmem += zone_page_state(zone, NR_FREE_PAGES);
1876 count += zone_page_state(zone, NR_FREE_PAGES);
1903 * To avoid excessive pressure on the normal zone, leave room in it to
2013 struct zone *zone;
2016 for_each_populated_zone(zone)
2017 if (!is_highmem(zone))
2018 free += zone_page_state(zone, NR_FREE_PAGES);