| /linux/drivers/md/dm-vdo/ |
| H A D | logical-zone.c | 55 struct logical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations); in initialize_zone() 63 zone->next = &zones->zones[zone_number + 1]; in initialize_zone() 65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone() 67 zone->zones = zones; in initialize_zone() 68 zone->zone_number = zone_number; in initialize_zone() 69 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_zone() 70 zone->block_map_zone = &vdo->block_map->zones[zone_number]; in initialize_zone() 71 INIT_LIST_HEAD(&zone->write_vios); in initialize_zone() 72 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_zone() [all …]
|
| H A D | block-map.c | 60 struct block_map_zone *zone; member 98 struct block_map_zone *zone; member 200 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info() 251 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread() 253 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread() 259 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed() 637 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument 639 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete() 640 (zone->active_lookups == 0) && in check_for_drain_complete() 641 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete() [all …]
|
| H A D | physical-zone.c | 330 struct physical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 332 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations); in initialize_zone() 336 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); in initialize_zone() 338 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 342 zone->zone_number = zone_number; in initialize_zone() 343 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone() 344 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone() 345 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone() 346 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone() 348 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone() [all …]
|
| H A D | dedupe.c | 321 static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name) in assert_in_hash_zone() argument 323 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), in assert_in_hash_zone() 332 static inline bool change_timer_state(struct hash_zone *zone, int old, int new) in change_timer_state() argument 334 return (atomic_cmpxchg(&zone->timer_state, old, new) == old); in change_timer_state() 342 static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock) in return_hash_lock_to_pool() argument 348 list_add_tail(&lock->pool_node, &zone->lock_pool); in return_hash_lock_to_pool() 694 vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, in unlock_duplicate_pbn() 718 struct hash_zone *zone = context->zone; in release_context() local 720 WRITE_ONCE(zone->active, zone->active - 1); in release_context() 721 list_move(&context->list_entry, &zone->available); in release_context() [all …]
|
| /linux/fs/pstore/ |
| H A D | zone.c | 160 static inline int buffer_datalen(struct pstore_zone *zone) in buffer_datalen() argument 162 return atomic_read(&zone->buffer->datalen); in buffer_datalen() 165 static inline int buffer_start(struct pstore_zone *zone) in buffer_start() argument 167 return atomic_read(&zone->buffer->start); in buffer_start() 175 static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, in psz_zone_read_buffer() argument 178 if (!buf || !zone || !zone->buffer) in psz_zone_read_buffer() 180 if (off > zone->buffer_size) in psz_zone_read_buffer() 182 len = min_t(size_t, len, zone->buffer_size - off); in psz_zone_read_buffer() 183 memcpy(buf, zone->buffer->data + off, len); in psz_zone_read_buffer() 187 static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, in psz_zone_read_oldbuf() argument [all …]
|
| /linux/mm/ |
| H A D | page_alloc.c | 81 * shuffle the whole zone). 93 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 308 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 320 static bool cond_accept_memory(struct zone *zone, unsigned int order, 346 _deferred_grow_zone(struct zone *zone, unsigned int order) 348 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 356 static inline bool _deferred_grow_zone(struct zone *zone, unsigne in deferred_pages_enabled() 347 _deferred_grow_zone(struct zone * zone,unsigned int order) _deferred_grow_zone() argument 357 _deferred_grow_zone(struct zone * zone,unsigned int order) _deferred_grow_zone() argument 605 page_outside_zone_boundaries(struct zone * zone,struct page * page) page_outside_zone_boundaries() argument 630 bad_range(struct zone * zone,struct page * page) bad_range() argument 640 bad_range(struct zone * zone,struct page * page) bad_range() argument 760 task_capc(struct zone * zone) task_capc() argument 802 task_capc(struct zone * zone) task_capc() argument 815 account_freepages(struct zone * zone,int nr_pages,int migratetype) account_freepages() argument 833 __add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail) __add_to_free_list() argument 859 move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int old_mt,int new_mt) move_to_free_list() argument 883 __del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype) __del_page_from_free_list() argument 905 del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype) del_page_from_free_list() argument 981 __free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags) __free_one_page() argument 1481 free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex) free_pcppages_bulk() argument 1535 split_large_buddy(struct zone * zone,struct page * page,unsigned long pfn,int order,fpi_t fpi) split_large_buddy() argument 1558 add_page_to_zone_llist(struct zone * zone,struct page * page,unsigned int order) add_page_to_zone_llist() argument 1567 free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,fpi_t fpi_flags) free_one_page() argument 1607 struct zone *zone = page_zone(page); __free_pages_ok() local 1686 __pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone) __pageblock_pfn_to_page() argument 1727 expand(struct zone * zone,struct page * page,int low,int high,int migratetype) expand() argument 1755 page_del_and_expand(struct zone * zone,struct page * page,int low,int high,int migratetype) page_del_and_expand() argument 1914 __rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype) __rmqueue_smallest() argument 1953 __rmqueue_cma_fallback(struct zone * zone,unsigned int order) __rmqueue_cma_fallback() argument 1959 __rmqueue_cma_fallback(struct zone * zone,unsigned int order) __rmqueue_cma_fallback() argument 1967 __move_freepages_block(struct zone * zone,unsigned long start_pfn,int old_mt,int new_mt) __move_freepages_block() argument 2000 prep_move_freepages_block(struct zone * zone,struct page * page,unsigned long * start_pfn,int * num_free,int * num_movable) prep_move_freepages_block() argument 2050 move_freepages_block(struct zone * zone,struct page * page,int old_mt,int new_mt) move_freepages_block() argument 2124 __move_freepages_block_isolate(struct zone * zone,struct page * page,bool isolate) __move_freepages_block_isolate() argument 2176 pageblock_isolate_and_move_free_pages(struct zone * zone,struct page * page) pageblock_isolate_and_move_free_pages() argument 2181 pageblock_unisolate_and_move_free_pages(struct zone * zone,struct page * page) pageblock_unisolate_and_move_free_pages() argument 2188 boost_watermark(struct zone * zone) boost_watermark() argument 2307 try_to_claim_block(struct zone * zone,struct page * page,int current_order,int order,int start_type,int block_type,unsigned int alloc_flags) try_to_claim_block() argument 2382 __rmqueue_claim(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags) __rmqueue_claim() argument 2437 __rmqueue_steal(struct zone * zone,int order,int start_migratetype) __rmqueue_steal() argument 2473 __rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,enum rmqueue_mode * mode) __rmqueue() argument 2542 rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags) rmqueue_bulk() argument 2583 decay_pcp_high(struct zone * zone,struct per_cpu_pages * pcp) decay_pcp_high() argument 2623 drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp) drain_zone_pages() argument 2641 drain_pages_zone(unsigned int cpu,struct zone * zone) drain_pages_zone() argument 2666 struct zone *zone; drain_pages() local 2676 drain_local_pages(struct zone * zone) drain_local_pages() argument 2696 __drain_all_pages(struct zone * zone,bool force_all_cpus) __drain_all_pages() argument 2769 drain_all_pages(struct zone * zone) drain_all_pages() argument 2799 nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,int batch,bool free_high) nr_pcp_high() argument 2854 free_frozen_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order,fpi_t fpi_flags,unsigned long * UP_flags) free_frozen_page_commit() argument 2964 struct zone *zone; __free_frozen_pages() local 3050 struct zone *zone = folio_zone(folio); free_unref_folios() local 3135 struct zone *zone = page_zone(page); __isolate_free_page() local 3185 struct zone *zone = page_zone(page); __putback_isolated_page() local 3222 rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype) rmqueue_buddy() argument 3267 nr_pcp_alloc(struct per_cpu_pages * pcp,struct zone * zone,int order) nr_pcp_alloc() argument 3319 __rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list) __rmqueue_pcplist() argument 3351 rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags) rmqueue_pcplist() argument 3394 rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype) rmqueue() argument 3428 reserve_highatomic_pageblock(struct page * page,int order,struct zone * zone) reserve_highatomic_pageblock() argument 3485 struct zone *zone; unreserve_highatomic_pageblock() local 3718 zone_allows_reclaim(struct zone * local_zone,struct zone * zone) zone_allows_reclaim() argument 3724 zone_allows_reclaim(struct zone * local_zone,struct zone * zone) zone_allows_reclaim() argument 3739 alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask) alloc_flags_nofragment() argument 3795 struct zone *zone; get_page_from_freelist() local 4187 struct zone *zone = page_zone(page); __alloc_pages_direct_compact() local 4287 struct zone *zone; should_compact_retry() local 4457 struct zone *zone; wake_all_kswapds() local 4587 struct zone *zone; should_reclaim_retry() local 5061 struct zone *zone; alloc_pages_bulk_noprof() local 5500 struct zone *zone; nr_free_zone_pages() local 5532 zoneref_set_zone(struct zone * zone,struct zoneref * zoneref) zoneref_set_zone() argument 5545 struct zone *zone; build_zonerefs_node() local 5914 zone_batchsize(struct zone * zone) zone_batchsize() argument 5962 zone_highsize(struct zone * zone,int batch,int cpu_online,int high_fraction) zone_highsize() argument 6058 __zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high_min,unsigned long high_max,unsigned long batch) __zone_set_pageset_high_and_batch() argument 6074 zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online) zone_set_pageset_high_and_batch() argument 6106 setup_zone_pageset(struct zone * zone) setup_zone_pageset() argument 6131 zone_pcp_update(struct zone * zone,int cpu_online) zone_pcp_update() argument 6138 zone_pcp_update_cacheinfo(struct zone * zone,unsigned int cpu) zone_pcp_update_cacheinfo() argument 6163 struct zone *zone; setup_pcp_cacheinfo() local 6176 struct zone *zone; setup_per_cpu_pageset() local 6201 zone_pcp_init(struct zone * zone) zone_pcp_init() argument 6277 struct zone *zone; page_alloc_cpu_dead() local 6308 struct zone *zone; page_alloc_cpu_online() local 6341 struct zone *zone = pgdat->node_zones + i; calculate_totalreserve_pages() local 6401 struct zone *zone = &pgdat->node_zones[i]; setup_per_zone_lowmem_reserve() local 6429 struct zone *zone; __setup_per_zone_wmarks() local 6498 struct zone *zone; setup_per_zone_wmarks() local 6611 struct zone *zone; setup_min_unmapped_ratio() local 6639 struct zone *zone; setup_min_slab_ratio() local 6697 struct zone *zone; percpu_pagelist_high_fraction_sysctl_handler() local 7147 zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages) zone_spans_last_pfn() argument 7183 struct zone *zone; alloc_contig_pages_noprof() local 7250 zone_pcp_disable(struct zone * zone) zone_pcp_disable() argument 7257 zone_pcp_enable(struct zone * zone) zone_pcp_enable() argument 7264 zone_pcp_reset(struct zone * zone) zone_pcp_reset() argument 7298 struct zone *zone; __offline_isolated_pages() local 7360 add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype,bool tail) add_to_free_list() argument 7372 break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype) break_down_buddy_pages() argument 7403 struct zone *zone = page_zone(page); take_page_off_buddy() local 7439 struct zone *zone = page_zone(page); put_page_back_buddy() local 7460 has_managed_zone(enum zone_type zone) has_managed_zone() argument 7496 __accept_page(struct zone * zone,unsigned long * flags,struct page * page) __accept_page() argument 7512 struct zone *zone = page_zone(page); accept_page() local 7525 try_to_accept_memory_one(struct zone * zone) try_to_accept_memory_one() argument 7544 cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags) cond_accept_memory() argument 7585 struct zone *zone = page_zone(page); __free_unaccepted() local 7608 cond_accept_memory(struct zone * zone,unsigned int order,int alloc_flags) cond_accept_memory() argument [all...] |
| H A D | show_mem.c | 26 static inline void show_node(struct zone *zone) in show_node() argument 29 printk("Node %d ", zone_to_nid(zone)); in show_node() 38 struct zone *zone; in si_mem_available() local 40 for_each_zone(zone) in si_mem_available() 41 wmark_low += low_wmark_pages(zone); in si_mem_available() 98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local 99 managed_pages += zone_managed_pages(zone); in si_meminfo_node() 184 struct zone *zone; show_free_areas() local 405 struct zone *zone; __show_mem() local [all...] |
| H A D | compaction.c | 126 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 128 zone->compact_considered = 0; in defer_compaction() 129 zone->compact_defer_shift++; in defer_compaction() 131 if (order < zone->compact_order_failed) in defer_compaction() 132 zone->compact_order_failed = order; in defer_compaction() 134 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction() 135 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction() 137 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 141 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument 143 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred() [all …]
|
| H A D | vmstat.c | 39 /* zero numa counters within a zone */ 40 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument 45 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters() 47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters() 56 struct zone *zone; in zero_zones_numa_counters() local 58 for_each_populated_zone(zone) in zero_zones_numa_counters() 59 zero_zone_numa_counters(zone); in zero_zones_numa_counters() 161 * Manage combined zone base 172 fold_vm_zone_numa_events(struct zone * zone) fold_vm_zone_numa_events() argument 192 struct zone *zone; fold_vm_numa_events() local 201 calculate_pressure_threshold(struct zone * zone) calculate_pressure_threshold() argument 225 calculate_normal_threshold(struct zone * zone) calculate_normal_threshold() argument 278 struct zone *zone; refresh_zone_stat_thresholds() local 321 set_pgdat_percpu_threshold(pg_data_t * pgdat,int (* calculate_pressure)(struct zone *)) set_pgdat_percpu_threshold() argument 323 struct zone *zone; set_pgdat_percpu_threshold() local 345 __mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta) __mod_zone_page_state() argument 435 __inc_zone_state(struct zone * zone,enum zone_stat_item item) __inc_zone_state() argument 491 __dec_zone_state(struct zone * zone,enum zone_stat_item item) __dec_zone_state() argument 560 mod_zone_state(struct zone * zone,enum zone_stat_item item,long delta,int overstep_mode) mod_zone_state() argument 599 mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta) mod_zone_page_state() argument 695 mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta) mod_zone_page_state() argument 709 struct zone *zone; inc_zone_page_state() local 817 struct zone *zone; refresh_cpu_vm_stats() local 907 struct zone *zone; cpu_vm_stats_fold() local 963 drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats) drain_zonestat() argument 1078 fill_contig_page_info(struct zone * zone,unsigned int suitable_order,struct contig_page_info * info) fill_contig_page_info() argument 1145 extfrag_for_order(struct zone * zone,unsigned int order) extfrag_for_order() argument 1159 fragmentation_index(struct zone * zone,unsigned int order) fragmentation_index() argument 1542 walk_zones_in_node(struct seq_file * m,pg_data_t * pgdat,bool assert_populated,bool nolock,void (* print)(struct seq_file * m,pg_data_t *,struct zone *)) walk_zones_in_node() argument 1544 struct zone *zone; walk_zones_in_node() local 1563 frag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) frag_show_print() argument 1588 pagetypeinfo_showfree_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) pagetypeinfo_showfree_print() argument 1645 pagetypeinfo_showblockcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) pagetypeinfo_showblockcount_print() argument 1752 is_zone_first_populated(pg_data_t * pgdat,struct zone * zone) is_zone_first_populated() argument 1767 zoneinfo_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) zoneinfo_show_print() argument 2070 struct zone *zone; need_update() local 2331 unusable_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) unusable_show_print() argument 2381 extfrag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) extfrag_show_print() argument [all...] |
| H A D | mm_init.c | 69 struct zone *zone; in mminit_verify_zonelist() local 76 /* Identify the zone and nodelist */ in mminit_verify_zonelist() 80 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 81 if (!populated_zone(zone)) in mminit_verify_zonelist() 87 zone->name); in mminit_verify_zonelist() 90 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist() 91 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist() 311 * Sum pages in active regions for movable zone 582 __init_single_page(struct page * page,unsigned long pfn,unsigned long zone,int nid) __init_single_page() argument 680 struct zone *zone = &pgdat->node_zones[zid]; __init_page_from_nid() local 805 overlap_memmap_init(unsigned long zone,unsigned long * pfn) overlap_memmap_init() argument 850 init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node) init_unavailable_range() argument 875 memmap_init_range(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,unsigned long zone_end_pfn,enum meminit_context context,struct vmem_altmap * altmap,int migratetype,bool isolate_pageblock) memmap_init_range() argument 944 memmap_init_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,unsigned long * hole_pfn) memmap_init_zone_range() argument 979 struct zone *zone = node->node_zones + j; memmap_init() local 1111 memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap) memmap_init_zone_device() argument 1347 struct zone *zone = pgdat->node_zones + i; calculate_node_totalpages() local 1424 zone_init_internals(struct zone * zone,enum zone_type idx,int nid,unsigned long remaining_pages) zone_init_internals() argument 1436 zone_init_free_lists(struct zone * zone) zone_init_free_lists() argument 1449 init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size) init_currently_empty_zone() argument 1492 setup_usemap(struct zone * zone) setup_usemap() argument 1507 setup_usemap(struct zone * zone) setup_usemap() argument 1588 struct zone *zone = pgdat->node_zones + z; free_area_init_core_hotplug() local 1605 struct zone *zone = pgdat->node_zones + j; free_area_init_core() local 1757 struct zone *zone = &pgdat->node_zones[zone_type]; check_for_memory() local 1827 int i, nid, zone; free_area_init() local 2034 deferred_init_pages(struct zone * zone,unsigned long pfn,unsigned long end_pfn) deferred_init_pages() argument 2062 deferred_init_memmap_chunk(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone,bool can_resched) deferred_init_memmap_chunk() argument 2102 struct zone *zone = arg; deferred_init_memmap_job() local 2121 struct zone *zone; deferred_init_memmap() local 2185 deferred_grow_zone(struct zone * zone,unsigned int order) deferred_grow_zone() argument 2266 set_zone_contiguous(struct zone * zone) set_zone_contiguous() argument 2296 struct zone *zone, *izone = NULL; pfn_range_intersects_zones() local 2316 struct zone *zone; page_alloc_init_late() local [all...] |
| H A D | memory_hotplug.c | 159 * specifying a zone (MMOP_ONLINE) 161 * "contig-zones": keep zone contiguous 434 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 445 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 455 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 470 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 479 static void shrink_zone_span(struct zone *zone, unsigne argument 522 struct zone *zone; update_pgdat_span() local 547 remove_pfn_range_from_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages) remove_pfn_range_from_zone() argument 703 resize_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages) resize_zone_range() argument 748 move_pfn_range_to_zone(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct vmem_altmap * altmap,int migratetype,bool isolate_pageblock) move_pfn_range_to_zone() argument 795 auto_movable_stats_account_zone(struct auto_movable_stats * stats,struct zone * zone) auto_movable_stats_account_zone() argument 849 struct zone *zone; auto_movable_can_online_movable() local 905 struct zone *zone = &pgdat->node_zones[zid]; default_kernel_zone_for_pfn() local 1072 struct zone *zone = page_zone(page); adjust_present_page_count() local 1091 mhp_init_memmap_on_memory(unsigned long pfn,unsigned long nr_pages,struct zone * zone) mhp_init_memmap_on_memory() argument 1145 online_pages(unsigned long pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group) online_pages() argument 1895 offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group) offline_pages() argument [all...] |
| H A D | page_reporting.c | 146 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, in page_reporting_cycle() argument 150 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle() 164 spin_lock_irq(&zone->lock); in page_reporting_cycle() 216 * zone lock. in page_reporting_cycle() 222 spin_unlock_irq(&zone->lock); in page_reporting_cycle() 233 /* reacquire zone lock and resume processing */ in page_reporting_cycle() 234 spin_lock_irq(&zone->lock); in page_reporting_cycle() 254 spin_unlock_irq(&zone->lock); in page_reporting_cycle() 261 struct scatterlist *sgl, struct zone *zon in page_reporting_process_zone() argument 314 struct zone *zone; page_reporting_process() local [all...] |
| /linux/tools/power/cpupower/lib/ |
| H A D | powercap.c | 124 static int sysfs_powercap_get64_val(struct powercap_zone *zone, in sysfs_powercap_get64_val() argument 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 146 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_energy_range_uj() argument 148 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); in powercap_get_max_energy_range_uj() 151 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_energy_uj() argument 153 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); in powercap_get_energy_uj() 156 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_power_range_uw() argument 158 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); in powercap_get_max_power_range_uw() 161 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_power_uw() argument 163 return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); in powercap_get_power_uw() [all …]
|
| /linux/drivers/block/ |
| H A D | zloop.c | 158 struct zloop_zone *zone = &zlo->zones[zone_no]; in zloop_update_seq_zone() local 164 lockdep_assert_held(&zone->lock); in zloop_update_seq_zone() 166 ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0); in zloop_update_seq_zone() 170 set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); in zloop_update_seq_zone() 187 spin_lock_irqsave(&zone->wp_lock, flags); in zloop_update_seq_zone() 189 zone->cond = BLK_ZONE_COND_EMPTY; in zloop_update_seq_zone() 190 zone->wp = zone->start; in zloop_update_seq_zone() 192 zone->cond = BLK_ZONE_COND_FULL; in zloop_update_seq_zone() 193 zone->wp = ULLONG_MAX; in zloop_update_seq_zone() 195 zone->cond = BLK_ZONE_COND_CLOSED; in zloop_update_seq_zone() [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| H A D | index.c | 78 static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter) in is_zone_chapter_sparse() argument 80 return uds_is_chapter_sparse(zone->index->volume->geometry, in is_zone_chapter_sparse() 81 zone->oldest_virtual_chapter, in is_zone_chapter_sparse() 82 zone->newest_virtual_chapter, virtual_chapter); in is_zone_chapter_sparse() 85 static int launch_zone_message(struct uds_zone_message message, unsigned int zone, in launch_zone_message() argument 97 request->zone_number = zone; in launch_zone_message() 110 unsigned int zone; in enqueue_barrier_messages() local 112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages() 113 int result = launch_zone_message(message, zone, index); in enqueue_barrier_messages() 127 struct index_zone *zone; in triage_index_request() local [all …]
|
| /linux/include/net/netfilter/ |
| H A D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() [all …]
|
| /linux/include/linux/ |
| H A D | mmzone.h | 150 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 739 * zone lock contention and keep cache-hot pages reusing. 828 * faulted, they come from the right zone right away. However, it is 832 * to a different zone. When migration fails - pinning fails. 853 * on different platforms may end up in a movable zone. ZERO_PAGE(0) 856 * memory to the MOVABLE zone, the vmemmap pages are also placed in 857 * such zone. Such pages cannot be really moved around as they are 879 struct zone { struct 882 /* zone watermarks, access with *_wmark_pages(zone) macro 892 nr_free_highatomiczone global() argument 903 zone_pgdatzone global() argument 905 per_cpu_zonestatszone global() argument 923 zone_start_pfnzone global() argument 926 pageblock_flagszone global() argument 983 namezone global() argument 1005 unaccepted_pageszone global() argument 1009 unaccepted_cleanupzone global() argument 1010 flagszone global() argument 1013 lockzone global() argument 1016 trylock_free_pageszone global() argument 1026 percpu_drift_markzone global() argument 1030 compact_cached_free_pfnzone global() argument 1032 compact_cached_migrate_pfnzone global() argument 1033 compact_init_migrate_pfnzone global() argument 1034 compact_init_free_pfnzone global() argument 1044 compact_consideredzone global() argument 1045 compact_defer_shiftzone global() argument 1069 compact_order_failedzone global() argument 1070 vm_numa_eventzone global() argument 1103 zone_managed_pages(const struct zone * zone) zone_managed_pages() argument 1108 zone_cma_pages(struct zone * zone) zone_cma_pages() argument 1117 zone_end_pfn(const struct zone * zone) zone_end_pfn() argument 1122 zone_spans_pfn(const struct zone * zone,unsigned long pfn) zone_spans_pfn() argument 1127 zone_is_initialized(const struct zone * zone) zone_is_initialized() argument 1132 zone_is_empty(const struct zone * zone) zone_is_empty() argument 1272 zone_intersects(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages) zone_intersects() argument 1311 struct zone *zone; /* Pointer to actual zone */ global() member 1577 zone_idx(zone) global() argument 1580 zone_is_zone_device(const struct zone * zone) zone_is_zone_device() argument 1585 zone_is_zone_device(const struct zone * zone) zone_is_zone_device() argument 1597 managed_zone(const struct zone * zone) managed_zone() argument 1603 populated_zone(const struct zone * zone) populated_zone() argument 1609 zone_to_nid(const struct zone * zone) zone_to_nid() argument 1614 zone_set_nid(struct zone * zone,int nid) zone_set_nid() argument 1619 zone_to_nid(const struct zone * zone) zone_to_nid() argument 1624 zone_set_nid(struct zone * zone,int nid) zone_set_nid() argument 1646 is_highmem(const struct zone * zone) is_highmem() argument 1695 for_each_zone(zone) global() argument 1700 for_each_populated_zone(zone) global() argument 1787 for_each_zone_zonelist_nodemask(zone,z,zlist,highidx,nodemask) global() argument 1793 for_next_zone_zonelist_nodemask(zone,z,highidx,nodemask) global() argument 1809 for_each_zone_zonelist(zone,z,zlist,highidx) global() argument [all...] |
| H A D | vmstat.h | 142 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 145 atomic_long_add(x, &zone->vm_numa_event[item]); in zone_numa_event_add() 149 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 152 return atomic_long_read(&zone->vm_numa_event[item]); in zone_numa_event_state() 162 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 165 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add() 204 static inline unsigned long zone_page_state(struct zone *zon argument 221 zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item) zone_page_state_snapshot() argument 240 __count_numa_event(struct zone * zone,enum numa_stat_item item) __count_numa_event() argument 248 __count_numa_events(struct zone * zone,enum numa_stat_item item,long delta) __count_numa_events() argument 312 __mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta) __mod_zone_page_state() argument 335 __inc_zone_state(struct zone * zone,enum zone_stat_item item) __inc_zone_state() argument 347 __dec_zone_state(struct zone * zone,enum zone_stat_item item) __dec_zone_state() argument 407 drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats) drain_zonestat() argument [all...] |
| H A D | compaction.h | 90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 91 extern int fragmentation_index(struct zone *zone, unsigned int order); 97 extern bool compaction_suitable(struct zone *zone, int order, 100 extern void compaction_defer_reset(struct zone *zone, int order, 115 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
|
| /linux/fs/adfs/ |
| H A D | map.c | 159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument 166 dm = asb->s_map + zone; in scan_map() 167 zone = asb->s_map_size; in scan_map() 168 dm_end = asb->s_map + zone; in scan_map() 179 } while (--zone > 0); in scan_map() 202 unsigned int zone; in adfs_map_statfs() local 205 zone = asb->s_map_size; in adfs_map_statfs() 209 } while (--zone > 0); in adfs_map_statfs() 220 unsigned int zone, mapoff; in adfs_map_lookup() local 228 zone = asb->s_map_size >> 1; in adfs_map_lookup() [all …]
|
| /linux/virt/kvm/ |
| H A D | coalesced_mmio.c | 36 if (addr < dev->zone.addr) in coalesced_mmio_in_range() 38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range() 74 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write() 123 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument 128 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio() 138 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio() 142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio() 143 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio() 159 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument 164 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 255 zone->flags = flags; in mlx4_zone_add_one() 256 zone->bitmap = bitmap; in mlx4_zone_add_one() 257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one() 258 zone->priority = priority; in mlx4_zone_add_one() 259 zone->offset = offset; in mlx4_zone_add_one() 263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one() 264 zone->allocator = zone_alloc; in mlx4_zone_add_one() 274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one() [all …]
|
| /linux/include/trace/events/ |
| H A D | compaction.h | 194 TP_PROTO(struct zone *zone, 198 TP_ARGS(zone, order, ret), 208 __entry->nid = zone_to_nid(zone); 209 __entry->idx = zone_idx(zone); 223 TP_PROTO(struct zone *zone, 227 TP_ARGS(zone, order, ret) 232 TP_PROTO(struct zone *zone, 236 TP_ARGS(zone, order, ret) 241 TP_PROTO(struct zone *zone, int order), 243 TP_ARGS(zone, order), [all …]
|
| /linux/drivers/md/ |
| H A D | dm-zoned-target.c | 21 struct dm_zone *zone; member 86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local 88 if (zone) { in dmz_bio_endio() 91 dmz_is_seq(zone)) in dmz_bio_endio() 92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio() 93 dmz_deactivate_zone(zone); in dmz_bio_endio() 116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument 122 struct dmz_dev *dev = zone->dev; in dmz_submit_bio() 134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio() 144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio() [all …]
|
| /linux/Documentation/filesystems/ |
| H A D | zonefs.rst | 10 zonefs is a very simple file system exposing each zone of a zoned block device 24 by allowing SSTables to be stored in a zone file similarly to a regular file 26 of the higher level construct "one file is one zone" can help reducing the 34 space that is divided into zones. A zone is a group of consecutive LBAs and all 41 sequentially. Each sequential zone has a write pointer maintained by the 43 to the device. As a result of this write constraint, LBAs in a sequential zone 45 command (zone reset) before rewriting. 61 representing zones are grouped by zone type, which are themselves represented 62 by sub-directories. This file structure is built entirely using zone information 71 mount, zonefs uses blkdev_report_zones() to obtain the device zone configuration [all …]
|