/linux/drivers/md/dm-vdo/ |
H A D | logical-zone.c | 6 #include "logical-zone.h" 21 #include "physical-zone.h" 47 * initialize_zone() - Initialize a logical zone. 48 * @zones: The logical_zones to which this zone belongs. 55 struct logical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations); in initialize_zone() 63 zone->next = &zones->zones[zone_number + 1]; in initialize_zone() 65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone() 67 zone->zones = zones; in initialize_zone() 68 zone->zone_number = zone_number; in initialize_zone() [all …]
|
/linux/fs/pstore/ |
H A D | zone.c | 26 * struct psz_buffer - header of zone to flush to storage 31 * @data: zone data. 66 * @off: zone offset of storage 67 * @type: front-end type for this zone 68 * @name: front-end name for this zone 69 * @buffer: pointer to data buffer managed by this zone 72 * @should_recover: whether this zone should recover from storage 75 * zone structure in memory. 90 * struct psz_context - all about running state of pstore/zone 93 * @ppsz: pmsg storage zone [all …]
|
/linux/include/uapi/linux/ |
H A D | blkzoned.h | 25 * @BLK_ZONE_TYPE_CONVENTIONAL: The zone has no write pointer and can be writen 26 * randomly. Zone reset has no effect on the zone. 27 * @BLK_ZONE_TYPE_SEQWRITE_REQ: The zone must be written sequentially 28 * @BLK_ZONE_TYPE_SEQWRITE_PREF: The zone can be written non-sequentially 39 * enum blk_zone_cond - Condition [state] of a zone in a zoned device. 41 * @BLK_ZONE_COND_NOT_WP: The zone has no write pointer, it is conventional. 42 * @BLK_ZONE_COND_EMPTY: The zone is empty. 43 * @BLK_ZONE_COND_IMP_OPEN: The zone is open, but not explicitly opened. 45 * OPEN ZONE command. 46 * @BLK_ZONE_COND_CLOSED: The zone was [explicitly] closed after writing. [all …]
|
/linux/Documentation/filesystems/ |
H A D | zonefs.rst | 4 ZoneFS - Zone filesystem for Zoned block devices 10 zonefs is a very simple file system exposing each zone of a zoned block device 24 by allowing SSTables to be stored in a zone file similarly to a regular file 26 of the higher level construct "one file is one zone" can help reducing the 34 space that is divided into zones. A zone is a group of consecutive LBAs and all 41 sequentially. Each sequential zone has a write pointer maintained by the 43 to the device. As a result of this write constraint, LBAs in a sequential zone 45 command (zone reset) before rewriting. 61 representing zones are grouped by zone type, which are themselves represented 62 by sub-directories. This file structure is built entirely using zone information [all …]
|
/linux/block/ |
H A D | blk-zoned.c | 38 * Per-zone write plug. 40 * @ref: Zone write plug reference counter. A zone write plug reference is 46 * reference is dropped whenever the zone of the zone write plug is reset, 47 * finished and when the zone becomes full (last write BIO to the zone 51 * @zone_no: The number of the zone the plug is managing. 52 * @wp_offset: The zone write pointer location relative to the start of the zone 56 * @rcu_head: RCU head to free zone write plugs with an RCU grace period. 73 * Zone write plug flags bits: 74 * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, 76 * being executed or the zone write plug bio list is not empty. [all …]
|
/linux/mm/ |
H A D | page_alloc.c | 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 82 * shuffle the whole zone). 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 314 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 316 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 429 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument 437 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries() [all …]
|
H A D | vmstat.c | 37 /* zero numa counters within a zone */ 38 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument 43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters() 45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters() 54 struct zone *zone; in zero_zones_numa_counters() local 56 for_each_populated_zone(zone) in zero_zones_numa_counters() 57 zero_zone_numa_counters(zone); in zero_zones_numa_counters() 158 * Manage combined zone based / global counters 169 static void fold_vm_zone_numa_events(struct zone *zone) in fold_vm_zone_numa_events() argument 178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events() [all …]
|
H A D | compaction.c | 73 * the "fragmentation score" of a node/zone. 159 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 161 zone->compact_considered = 0; in defer_compaction() 162 zone->compact_defer_shift++; in defer_compaction() 164 if (order < zone->compact_order_failed) in defer_compaction() 165 zone->compact_order_failed = order; in defer_compaction() 167 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction() 168 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction() 170 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 174 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument [all …]
|
H A D | memory_hotplug.c | 158 * specifying a zone (MMOP_ONLINE) 160 * "contig-zones": keep zone contiguous 433 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 444 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 454 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 469 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 478 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument 482 int nid = zone_to_nid(zone); in shrink_zone_span() 484 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 486 * If the section is smallest section in the zone, it need in shrink_zone_span() [all …]
|
/linux/tools/power/cpupower/lib/ |
H A D | powercap.c | 124 static int sysfs_powercap_get64_val(struct powercap_zone *zone, in sysfs_powercap_get64_val() argument 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 146 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_energy_range_uj() argument 148 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); in powercap_get_max_energy_range_uj() 151 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_energy_uj() argument 153 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); in powercap_get_energy_uj() 156 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_power_range_uw() argument 158 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); in powercap_get_max_power_range_uw() 161 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_power_uw() argument 163 return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); in powercap_get_power_uw() [all …]
|
/linux/drivers/md/dm-vdo/indexer/ |
H A D | index.c | 30 * get different numbers of records, some zones may fall behind others. Each time a zone fills up 32 * and also informs all other zones that it has closed the chapter. Each other zone will then close 34 * Once every zone has closed the chapter, the chapter writer will commit that chapter to storage. 36 * The last zone to close the chapter also removes the oldest chapter from the volume index. 38 * means that those zones will never ask the volume index about it. No zone is allowed to get more 39 * than one chapter ahead of any other. If a zone is so far ahead that it tries to close another 43 * request wants to add a chapter to the sparse cache, it sends a barrier message to each zone 44 * during the triage stage that acts as a rendezvous. Once every zone has reached the barrier and 45 * paused its operations, the cache membership is changed and each zone is then informed that it 48 * If a sparse cache has only one zone, it will not create a triage queue, but it still needs the [all …]
|
/linux/include/linux/ |
H A D | memory_hotplug.h | 11 struct zone; 22 /* Types for control the zone type of onlined and offlined memory */ 26 /* Online the memory. Zone depends, see default_zone_for_pfn(). */ 95 * Zone resizing functions 97 * Note: any attempt to resize a zone should has pgdat_resize_lock() 98 * zone_span_writelock() both held. This ensure the size of a zone 101 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument 103 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin() 105 static inline int zone_span_seqretry(struct zone *zon in zone_span_seqbegin() argument 109 zone_span_writelock(struct zone * zone) zone_span_writelock() argument 113 zone_span_writeunlock(struct zone * zone) zone_span_writeunlock() argument 117 zone_seqlock_init(struct zone * zone) zone_seqlock_init() argument 206 zone_span_seqbegin(struct zone * zone) zone_span_seqbegin() argument 210 zone_span_seqretry(struct zone * zone,unsigned iv) zone_span_seqretry() argument 214 zone_span_writelock(struct zone * zone) zone_span_writelock() argument 215 zone_span_writeunlock(struct zone * zone) zone_span_writeunlock() argument 216 zone_seqlock_init(struct zone * zone) zone_seqlock_init() argument 292 offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group) offline_pages() argument [all...] |
H A D | powercap.h | 16 * controlled. Each power zone can have one or more constraints. 28 * limits. If disabled power zone can only be monitored 75 * struct powercap_zone_ops - Define power zone callbacks 83 * @set_enable: Enable/Disable power zone controls. 92 * This structure defines zone callbacks to be implemented by client drivers. 112 * struct powercap_zone- Defines instance of a power cap zone 114 * @name: Power zone name. 115 * @control_type_inst: Control type instance for this zone. 116 * @ops: Pointer to the zone operation structure. 121 * @private_data: Private data pointer if any for this zone. [all …]
|
H A D | vmstat.h | 142 * Zone and node-based page accounting with per cpu differentials. 149 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 152 atomic_long_add(x, &zone->vm_numa_event[item]); in zone_numa_event_add() 156 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 159 return atomic_long_read(&zone->vm_numa_event[item]); in zone_numa_event_state() 169 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 172 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add() 211 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument 214 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state() 228 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument [all …]
|
/linux/fs/zonefs/ |
H A D | super.c | 32 * Get the name of a zone group directory. 48 * Manage the active zone count. 66 * If the zone is active, that is, if it is explicitly open or in zonefs_account_active() 79 /* The zone is not active. If it was, update the active count */ in zonefs_account_active() 87 * Manage the active zone count. Called with zi->i_truncate_mutex held. 97 * Execute a zone management operation. 105 * With ZNS drives, closing an explicitly open zone that has not been in zonefs_zone_mgmt() 106 * written will change the zone state to "closed", that is, the zone in zonefs_zone_mgmt() 108 * open operation on other zones if the drive active zone resources in zonefs_zone_mgmt() 109 * are exceeded, make sure that the zone does not remain active by in zonefs_zone_mgmt() [all …]
|
/linux/drivers/md/ |
H A D | dm-zoned-target.c | 17 * Zone BIO context. 21 struct dm_zone *zone; member 86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local 88 if (zone) { in dmz_bio_endio() 91 dmz_is_seq(zone)) in dmz_bio_endio() 92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio() 93 dmz_deactivate_zone(zone); in dmz_bio_endio() 116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument 122 struct dmz_dev *dev = zone->dev; in dmz_submit_bio() 134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio() [all …]
|
H A D | dm-zoned-reclaim.c | 57 * Align a sequential zone write pointer to chunk_block. 59 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, in dmz_reclaim_align_wp() argument 63 struct dmz_dev *dev = zone->dev; in dmz_reclaim_align_wp() 64 sector_t wp_block = zone->wp_block; in dmz_reclaim_align_wp() 80 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), in dmz_reclaim_align_wp() 84 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", in dmz_reclaim_align_wp() 85 zone->id, (unsigned long long)wp_block, in dmz_reclaim_align_wp() 91 zone->wp_block = block; in dmz_reclaim_align_wp() 148 /* Get a valid region from the source zone */ in dmz_reclaim_copy() 155 * If we are writing in a sequential zone, we must make sure in dmz_reclaim_copy() [all …]
|
/linux/Documentation/driver-api/thermal/ |
H A D | sysfs-api.rst | 13 The generic thermal sysfs provides a set of interfaces for thermal zone 17 This how-to focuses on enabling new thermal zone and cooling devices to 19 This solution is platform independent and any type of thermal zone devices 22 The main task of the thermal sysfs driver is to expose thermal zone attributes 25 inputs from thermal zone attributes (the current temperature and trip point 34 1.1 thermal zone device interface 48 This interface function adds a new thermal zone device (sensor) to the 53 the thermal zone type. 55 the table of trip points for this thermal zone. 59 thermal zone device call-backs. [all …]
|
/linux/fs/adfs/ |
H A D | map.c | 14 * zone which contains a bitstream made up of variable sized fragments. 30 * large or fragmented files. The first map zone a fragment starts in 32 * from any zone on the disk. 63 * return the map bit offset of the fragment frag_id in the zone dm. 109 * Scan the free space map, for this zone, calculating the total 133 * exist in this zone. in scan_free_map() 159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument 166 dm = asb->s_map + zone; in scan_map() 167 zone = asb->s_map_size; in scan_map() 168 dm_end = asb->s_map + zone; in scan_map() [all …]
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-zoned.rst | 54 Data in these zones may be directly mapped to the conventional zone, but 55 later moved to a sequential zone so that the conventional zone can be 65 1) The first block of the first conventional zone found contains the 73 indicates the zone number of the device storing the chunk of data. Each 74 mapping entry may also indicate if the zone number of a conventional 75 zone used to buffer random modification to the data zone. 80 data chunk, a block is always valid only in the data zone mapping the 81 chunk or in the buffer zone of the chunk. 83 For a logical chunk mapped to a conventional zone, all write operations 84 are processed by directly writing to the zone. If the mapping zone is a [all …]
|
/linux/include/trace/events/ |
H A D | compaction.h | 194 TP_PROTO(struct zone *zone, 198 TP_ARGS(zone, order, ret), 208 __entry->nid = zone_to_nid(zone); 209 __entry->idx = zone_idx(zone); 214 TP_printk("node=%d zone=%-8s order=%d ret=%s", 223 TP_PROTO(struct zone *zone, 227 TP_ARGS(zone, order, ret) 232 TP_PROTO(struct zone *zone, 236 TP_ARGS(zone, order, ret) 241 TP_PROTO(struct zone *zone, int order), [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 255 zone->flags = flags; in mlx4_zone_add_one() 256 zone->bitmap = bitmap; in mlx4_zone_add_one() 257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one() 258 zone->priority = priority; in mlx4_zone_add_one() 259 zone->offset = offset; in mlx4_zone_add_one() 263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one() 264 zone->allocator = zone_alloc; in mlx4_zone_add_one() 274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one() [all …]
|
/linux/include/net/netfilter/ |
H A D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() [all …]
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-class-powercap | 25 This status affects every power zone using this "control_type. 27 What: /sys/class/powercap/<control type>/<power zone> 32 A power zone is a single or a collection of devices, which can 33 be independently monitored and controlled. A power zone sysfs 37 What: /sys/class/powercap/<control type>/<power zone>/<child power zone> 45 power zone for a whole CPU package, each CPU core in it can 46 be a child power zone. 48 What: /sys/class/powercap/.../<power zone>/name 53 Specifies the name of this power zone. 55 What: /sys/class/powercap/.../<power zone>/energy_uj [all …]
|
/linux/Documentation/mm/ |
H A D | balance.rst | 21 is, only when needed (aka zone free memory is 0), instead of making it 30 of incurring the overhead of regular zone balancing. 35 would not be done even when the dma zone was completely empty. 2.2 has 40 In 2.3, zone balancing can be done in one of two ways: depending on the 41 zone size (and possibly of the size of lower class zones), we can decide 43 zone. The good part is, while balancing, we do not need to look at sizes 50 of a zone _and_ all its lower class zones falls below 1/64th of the 51 total memory in the zone and its lower class zones. This fixes the 2.2 58 Note that if the size of the regular zone is huge compared to dma zone, 60 deciding whether to balance the regular zone. The first solution [all …]
|