Home
last modified time | relevance | path

Searched refs:zone (Results 1 – 25 of 253) sorted by relevance

1234567891011

/linux/drivers/md/dm-vdo/
H A Dlogical-zone.c55 struct logical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local
58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations); in initialize_zone()
63 zone->next = &zones->zones[zone_number + 1]; in initialize_zone()
65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone()
67 zone->zones = zones; in initialize_zone()
68 zone->zone_number = zone_number; in initialize_zone()
69 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_zone()
70 zone->block_map_zone = &vdo->block_map->zones[zone_number]; in initialize_zone()
71 INIT_LIST_HEAD(&zone->write_vios); in initialize_zone()
72 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_zone()
[all …]
H A Dblock-map.c60 struct block_map_zone *zone; member
98 struct block_map_zone *zone; member
200 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info()
251 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread()
253 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread()
259 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed()
637 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument
639 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete()
640 (zone->active_lookups == 0) && in check_for_drain_complete()
641 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete()
[all …]
H A Dphysical-zone.c330 struct physical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local
332 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations); in initialize_zone()
336 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); in initialize_zone()
338 vdo_int_map_free(zone->pbn_operations); in initialize_zone()
342 zone->zone_number = zone_number; in initialize_zone()
343 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone()
344 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone()
345 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone()
346 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone()
348 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone()
[all …]
H A Ddedupe.c321 static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name) in assert_in_hash_zone() argument
323 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), in assert_in_hash_zone()
332 static inline bool change_timer_state(struct hash_zone *zone, int old, int new) in change_timer_state() argument
334 return (atomic_cmpxchg(&zone->timer_state, old, new) == old); in change_timer_state()
342 static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock) in return_hash_lock_to_pool() argument
348 list_add_tail(&lock->pool_node, &zone->lock_pool); in return_hash_lock_to_pool()
694 vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, in unlock_duplicate_pbn()
718 struct hash_zone *zone = context->zone; in release_context() local
720 WRITE_ONCE(zone->active, zone->active - 1); in release_context()
721 list_move(&context->list_entry, &zone->available); in release_context()
[all …]
/linux/fs/pstore/
H A Dzone.c160 static inline int buffer_datalen(struct pstore_zone *zone) in buffer_datalen() argument
162 return atomic_read(&zone->buffer->datalen); in buffer_datalen()
165 static inline int buffer_start(struct pstore_zone *zone) in buffer_start() argument
167 return atomic_read(&zone->buffer->start); in buffer_start()
175 static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, in psz_zone_read_buffer() argument
178 if (!buf || !zone || !zone->buffer) in psz_zone_read_buffer()
180 if (off > zone->buffer_size) in psz_zone_read_buffer()
182 len = min_t(size_t, len, zone->buffer_size - off); in psz_zone_read_buffer()
183 memcpy(buf, zone->buffer->data + off, len); in psz_zone_read_buffer()
187 static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, in psz_zone_read_oldbuf() argument
[all …]
/linux/mm/
H A Dpage_alloc.c294 static bool cond_accept_memory(struct zone *zone, unsigned int order,
320 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
322 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
330 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
578 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
586 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
587 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
588 sp = zone->spanned_pages; in page_outside_zone_boundaries()
589 ret = !zone_spans_pfn(zone, pfn); in page_outside_zone_boundaries()
590 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
[all …]
H A Dshow_mem.c26 static inline void show_node(struct zone *zone) in show_node() argument
29 printk("Node %d ", zone_to_nid(zone)); in show_node()
38 struct zone *zone; in si_mem_available() local
40 for_each_zone(zone) in si_mem_available()
41 wmark_low += low_wmark_pages(zone); in si_mem_available()
98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
99 managed_pages += zone_managed_pages(zone); in si_meminfo_node()
100 if (is_highmem(zone)) { in si_meminfo_node()
101 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
102 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
[all …]
H A Dcompaction.c126 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
128 zone->compact_considered = 0; in defer_compaction()
129 zone->compact_defer_shift++; in defer_compaction()
131 if (order < zone->compact_order_failed) in defer_compaction()
132 zone->compact_order_failed = order; in defer_compaction()
134 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
135 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
137 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
141 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
143 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
[all …]
H A Dvmstat.c40 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument
45 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
56 struct zone *zone; in zero_zones_numa_counters() local
58 for_each_populated_zone(zone) in zero_zones_numa_counters()
59 zero_zone_numa_counters(zone); in zero_zones_numa_counters()
172 static void fold_vm_zone_numa_events(struct zone *zone) in fold_vm_zone_numa_events() argument
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events()
187 zone_numa_event_add(zone_numa_events[item], zone, item); in fold_vm_zone_numa_events()
192 struct zone *zone; in fold_vm_numa_events() local
[all …]
H A Dmm_init.c69 struct zone *zone; in mminit_verify_zonelist() local
80 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
81 if (!populated_zone(zone)) in mminit_verify_zonelist()
87 zone->name); in mminit_verify_zonelist()
90 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
91 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
582 unsigned long zone, int nid) in __init_single_page() argument
585 set_page_links(page, zone, nid, pfn); in __init_single_page()
594 if (!is_highmem_idx(zone)) in __init_single_page()
680 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid() local
[all …]
H A Dmemory_hotplug.c434 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
445 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
455 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
470 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
479 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
483 int nid = zone_to_nid(zone); in shrink_zone_span()
485 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
492 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
493 zone_end_pfn(zone)); in shrink_zone_span()
495 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
[all …]
H A Dpage_reporting.c146 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, in page_reporting_cycle() argument
150 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
164 spin_lock_irq(&zone->lock); in page_reporting_cycle()
222 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
234 spin_lock_irq(&zone->lock); in page_reporting_cycle()
254 spin_unlock_irq(&zone->lock); in page_reporting_cycle()
261 struct scatterlist *sgl, struct zone *zone) in page_reporting_process_zone() argument
268 watermark = low_wmark_pages(zone) + in page_reporting_process_zone()
275 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in page_reporting_process_zone()
285 err = page_reporting_cycle(prdev, zone, order, mt, in page_reporting_process_zone()
[all …]
/linux/tools/power/cpupower/lib/
H A Dpowercap.c124 static int sysfs_powercap_get64_val(struct powercap_zone *zone, in sysfs_powercap_get64_val() argument
132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val()
146 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_energy_range_uj() argument
148 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); in powercap_get_max_energy_range_uj()
151 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_energy_uj() argument
153 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); in powercap_get_energy_uj()
156 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_power_range_uw() argument
158 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); in powercap_get_max_power_range_uw()
161 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_power_uw() argument
163 return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); in powercap_get_power_uw()
[all …]
/linux/drivers/md/dm-vdo/indexer/
H A Dindex.c78 static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter) in is_zone_chapter_sparse() argument
80 return uds_is_chapter_sparse(zone->index->volume->geometry, in is_zone_chapter_sparse()
81 zone->oldest_virtual_chapter, in is_zone_chapter_sparse()
82 zone->newest_virtual_chapter, virtual_chapter); in is_zone_chapter_sparse()
85 static int launch_zone_message(struct uds_zone_message message, unsigned int zone, in launch_zone_message() argument
97 request->zone_number = zone; in launch_zone_message()
110 unsigned int zone; in enqueue_barrier_messages() local
112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages()
113 int result = launch_zone_message(message, zone, index); in enqueue_barrier_messages()
127 struct index_zone *zone; in triage_index_request() local
[all …]
/linux/include/linux/
H A Dmmzone.h879 struct zone { struct
1077 static inline unsigned long wmark_pages(const struct zone *z, in wmark_pages()
1083 static inline unsigned long min_wmark_pages(const struct zone *z) in min_wmark_pages()
1088 static inline unsigned long low_wmark_pages(const struct zone *z) in low_wmark_pages()
1093 static inline unsigned long high_wmark_pages(const struct zone *z) in high_wmark_pages()
1098 static inline unsigned long promo_wmark_pages(const struct zone *z) in promo_wmark_pages()
1103 static inline unsigned long zone_managed_pages(const struct zone *zone) in zone_managed_pages() argument
1105 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
1108 static inline unsigned long zone_cma_pages(struct zone *zone) in zone_cma_pages() argument
1111 return zone->cma_pages; in zone_cma_pages()
[all …]
H A Dvmstat.h142 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument
145 atomic_long_add(x, &zone->vm_numa_event[item]); in zone_numa_event_add()
149 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument
152 return atomic_long_read(&zone->vm_numa_event[item]); in zone_numa_event_state()
162 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument
165 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add()
204 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument
207 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state()
221 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument
224 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state_snapshot()
[all …]
H A Dcompaction.h90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
91 extern int fragmentation_index(struct zone *zone, unsigned int order);
97 extern bool compaction_suitable(struct zone *zone, int order,
100 extern void compaction_defer_reset(struct zone *zone, int order,
115 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
/linux/include/net/netfilter/
H A Dnf_conntrack_zones.h12 return &ct->zone; in nf_ct_zone()
19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument
21 zone->id = id; in nf_ct_zone_init()
22 zone->flags = flags; in nf_ct_zone_init()
23 zone->dir = dir; in nf_ct_zone_init()
25 return zone; in nf_ct_zone_init()
36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl()
37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl()
43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument
46 ct->zone = *zone; in nf_ct_zone_add()
[all …]
/linux/fs/adfs/
H A Dmap.c159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument
166 dm = asb->s_map + zone; in scan_map()
167 zone = asb->s_map_size; in scan_map()
168 dm_end = asb->s_map + zone; in scan_map()
179 } while (--zone > 0); in scan_map()
202 unsigned int zone; in adfs_map_statfs() local
205 zone = asb->s_map_size; in adfs_map_statfs()
209 } while (--zone > 0); in adfs_map_statfs()
220 unsigned int zone, mapoff; in adfs_map_lookup() local
228 zone = asb->s_map_size >> 1; in adfs_map_lookup()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dalloc.c250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local
252 if (NULL == zone) in mlx4_zone_add_one()
255 zone->flags = flags; in mlx4_zone_add_one()
256 zone->bitmap = bitmap; in mlx4_zone_add_one()
257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one()
258 zone->priority = priority; in mlx4_zone_add_one()
259 zone->offset = offset; in mlx4_zone_add_one()
263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one()
264 zone->allocator = zone_alloc; in mlx4_zone_add_one()
274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one()
[all …]
/linux/virt/kvm/
H A Dcoalesced_mmio.c36 if (addr < dev->zone.addr) in coalesced_mmio_in_range()
38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range()
74 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write()
123 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument
128 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio()
138 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio()
142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio()
143 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio()
159 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument
164 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio()
[all …]
/linux/include/trace/events/
H A Dcompaction.h194 TP_PROTO(struct zone *zone,
198 TP_ARGS(zone, order, ret),
208 __entry->nid = zone_to_nid(zone);
209 __entry->idx = zone_idx(zone);
223 TP_PROTO(struct zone *zone,
227 TP_ARGS(zone, order, ret)
232 TP_PROTO(struct zone *zone,
236 TP_ARGS(zone, order, ret)
241 TP_PROTO(struct zone *zone, int order),
243 TP_ARGS(zone, order),
[all …]
/linux/drivers/md/
H A Ddm-zoned-target.c21 struct dm_zone *zone; member
86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local
88 if (zone) { in dmz_bio_endio()
91 dmz_is_seq(zone)) in dmz_bio_endio()
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio()
93 dmz_deactivate_zone(zone); in dmz_bio_endio()
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument
122 struct dmz_dev *dev = zone->dev; in dmz_submit_bio()
134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio()
144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio()
[all …]
/linux/Documentation/filesystems/
H A Dzonefs.rst10 zonefs is a very simple file system exposing each zone of a zoned block device
24 by allowing SSTables to be stored in a zone file similarly to a regular file
26 of the higher level construct "one file is one zone" can help reducing the
34 space that is divided into zones. A zone is a group of consecutive LBAs and all
41 sequentially. Each sequential zone has a write pointer maintained by the
43 to the device. As a result of this write constraint, LBAs in a sequential zone
45 command (zone reset) before rewriting.
61 representing zones are grouped by zone type, which are themselves represented
62 by sub-directories. This file structure is built entirely using zone information
71 mount, zonefs uses blkdev_report_zones() to obtain the device zone configuration
[all …]
/linux/block/
H A Dblk-zoned.c349 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, in blkdev_copy_zone_to_user() argument
354 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) in blkdev_copy_zone_to_user()
490 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone) in disk_zone_is_last() argument
492 return zone->start + zone->len >= get_capacity(disk); in disk_zone_is_last()
801 static unsigned int blk_zone_wp_offset(struct blk_zone *zone) in blk_zone_wp_offset() argument
803 switch (zone->cond) { in blk_zone_wp_offset()
808 return zone->wp - zone->start; in blk_zone_wp_offset()
825 struct blk_zone *zone) in disk_zone_wplug_sync_wp_offset() argument
828 unsigned int wp_offset = blk_zone_wp_offset(zone); in disk_zone_wplug_sync_wp_offset()
830 zwplug = disk_get_zone_wplug(disk, zone->start); in disk_zone_wplug_sync_wp_offset()
[all …]

1234567891011