| /linux/mm/ |
| H A D | swap.c | 73 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, in __page_cache_release() 89 struct lruvec *lruvec = NULL; in page_cache_release() local 92 __page_cache_release(folio, &lruvec, &flags); in page_cache_release() 93 if (lruvec) in page_cache_release() 94 unlock_page_lruvec_irqrestore(lruvec, flags); in page_cache_release() 116 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 118 static void lru_add(struct lruvec *lruvec, struct folio *folio) in lru_add() argument 154 lruvec_add_folio(lruvec, folio); in lru_add() 161 struct lruvec *lruvec = NULL; in folio_batch_move_lru() local 171 folio_lruvec_relock_irqsave(folio, &lruvec, &flags); in folio_batch_move_lru() [all …]
|
| H A D | workingset.c | 237 struct lruvec *lruvec; in lru_gen_eviction() local 249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 250 lrugen = &lruvec->lrugen; in lru_gen_eviction() 264 static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, in lru_gen_test_recent() argument 275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 277 max_seq = READ_ONCE((*lruvec)->lrugen.max_seq); in lru_gen_test_recent() 289 struct lruvec *lruvec; in lru_gen_refault() local 296 recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset); in lru_gen_refault() 297 if (lruvec != folio_lruvec(folio)) in lru_gen_refault() 300 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); in lru_gen_refault() [all …]
|
| H A D | vmscan.c | 411 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, in lruvec_lru_size() argument 418 for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) { in lruvec_lru_size() 420 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size() 1668 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument 1677 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes() 1704 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_folios() argument 1708 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios() 1793 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_folios() 1827 struct lruvec *lruvec; in folio_isolate_lru() local 1830 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru() [all …]
|
| H A D | mlock.c | 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 65 return lruvec; in __mlock_folio() 67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 76 lruvec_del_folio(lruvec, folio); in __mlock_folio() 78 lruvec_add_folio(lruvec, folio); in __mlock_folio() 92 lruvec_del_folio(lruvec, folio); in __mlock_folio() 96 lruvec_add_folio(lruvec, folio); in __mlock_folio() 100 return lruvec; in __mlock_folio() 103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_new_folio() argument 107 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_new_folio() [all …]
|
| H A D | mmzone.c | 75 void lruvec_init(struct lruvec *lruvec) in lruvec_init() argument 79 memset(lruvec, 0, sizeof(struct lruvec)); in lruvec_init() 80 spin_lock_init(&lruvec->lru_lock); in lruvec_init() 81 zswap_lruvec_state_init(lruvec); in lruvec_init() 84 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init() 91 list_del(&lruvec->lists[LRU_UNEVICTABLE]); in lruvec_init() 93 lru_gen_init_lruvec(lruvec); in lruvec_init()
|
| H A D | memcontrol.c | 391 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) in lruvec_page_state() argument 398 return node_page_state(lruvec_pgdat(lruvec), idx); in lruvec_page_state() 404 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in lruvec_page_state() 413 unsigned long lruvec_page_state_local(struct lruvec *lruvec, in lruvec_page_state_local() argument 421 return node_page_state(lruvec_pgdat(lruvec), idx); in lruvec_page_state_local() 427 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in lruvec_page_state_local() 729 static void mod_memcg_lruvec_state(struct lruvec *lruvec, in mod_memcg_lruvec_state() argument 741 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in mod_memcg_lruvec_state() 769 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in mod_lruvec_state() argument 773 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in mod_lruvec_state() [all …]
|
| H A D | zswap.c | 657 void zswap_lruvec_state_init(struct lruvec *lruvec) in zswap_lruvec_state_init() argument 659 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); in zswap_lruvec_state_init() 664 struct lruvec *lruvec; in zswap_folio_swapin() local 667 lruvec = folio_lruvec(folio); in zswap_folio_swapin() 668 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); in zswap_folio_swapin() 1212 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); in zswap_shrinker_count() local 1214 &lruvec->zswap_lruvec_state.nr_disk_swapins; in zswap_shrinker_count()
|
| H A D | compaction.c | 842 struct lruvec *lruvec; in isolate_migratepages_block() local 844 struct lruvec *locked = NULL; in isolate_migratepages_block() 1156 lruvec = folio_lruvec(folio); in isolate_migratepages_block() 1159 if (lruvec != locked) { in isolate_migratepages_block() 1163 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block() 1164 locked = lruvec; in isolate_migratepages_block() 1166 lruvec_memcg_debug(lruvec, folio); in isolate_migratepages_block() 1200 lruvec_del_folio(lruvec, folio); in isolate_migratepages_block()
|
| H A D | memcontrol-v1.c | 1743 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in mem_cgroup_node_nr_lru_pages() local 1753 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages() 1755 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages() 1933 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format() 1934 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
|
| H A D | migrate.c | 671 struct lruvec *old_lruvec, *new_lruvec; in __folio_migrate_mapping() 2729 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in migrate_misplaced_folio() local 2743 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded); in migrate_misplaced_folio()
|
| H A D | huge_memory.c | 3443 struct lruvec *lruvec, struct list_head *list) in lru_add_split_folio() argument 3446 lockdep_assert_held(&lruvec->lru_lock); in lru_add_split_folio() 3787 struct lruvec *lruvec; in __folio_freeze_and_split_unmapped() local 3834 lruvec = folio_lruvec_lock(folio); in __folio_freeze_and_split_unmapped() 3859 lru_add_split_folio(folio, new_folio, lruvec, list); in __folio_freeze_and_split_unmapped() 3902 unlock_page_lruvec(lruvec); in __folio_freeze_and_split_unmapped()
|
| /linux/include/linux/ |
| H A D | mm_inline.h | 38 static __always_inline void __update_lru_size(struct lruvec *lruvec, in __update_lru_size() argument 42 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in __update_lru_size() 44 lockdep_assert_held(&lruvec->lru_lock); in __update_lru_size() 47 mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size() 52 static __always_inline void update_lru_size(struct lruvec *lruvec, in update_lru_size() argument 56 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size() 58 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size() 164 static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen) in lru_gen_is_active() argument 166 unsigned long max_seq = lruvec->lrugen.max_seq; in lru_gen_is_active() 174 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, in lru_gen_update_size() argument [all …]
|
| H A D | memcontrol.h | 113 struct lruvec lruvec; member 706 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, in mem_cgroup_lruvec() 710 struct lruvec *lruvec; in mem_cgroup_lruvec() local 713 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec() 721 lruvec = &mz->lruvec; in mem_cgroup_lruvec() 728 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec() 729 lruvec->pgdat = pgdat; in mem_cgroup_lruvec() 730 return lruvec; in mem_cgroup_lruvec() 739 static inline struct lruvec *folio_lruvec(struct folio *folio) in folio_lruvec() 755 struct lruvec *folio_lruvec_lock(struct folio *folio); [all …]
|
| H A D | mmzone.h | 451 struct lruvec; 546 struct lruvec *lruvec; member 618 void lru_gen_init_lruvec(struct lruvec *lruvec); 634 static inline void lru_gen_init_lruvec(struct lruvec *lruvec) in lru_gen_init_lruvec() argument 669 struct lruvec { struct 1499 struct lruvec __lruvec; 1557 extern void lruvec_init(struct lruvec *lruvec); 1559 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) in lruvec_pgdat() argument 1562 return lruvec->pgdat; in lruvec_pgdat() 1564 return container_of(lruvec, struct pglist_data, __lruvec); in lruvec_pgdat()
|
| H A D | swap.h | 319 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); 332 void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, 334 __releases(lruvec->lru_lock);
|
| H A D | vmstat.h | 523 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 537 static inline void mod_lruvec_state(struct lruvec *lruvec, in mod_lruvec_state() argument 540 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in mod_lruvec_state()
|
| /linux/Documentation/mm/ |
| H A D | multigen_lru.rst | 83 ``lruvec``. The youngest generation number is stored in 114 The aging produces young generations. Given an ``lruvec``, it 130 The eviction consumes old generations. Given an ``lruvec``, it 148 set, an ``lruvec`` is protected from the eviction when its oldest
|
| H A D | process_addrs.rst | 432 lruvec->lru_lock (in folio_lruvec_lock_irq) 480 ->lruvec->lru_lock (follow_page_mask->mark_page_accessed) 481 ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
|
| /linux/mm/damon/ |
| H A D | core.c | 2048 struct lruvec *lruvec; in damos_get_node_memcg_used_bp() local 2062 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid)); in damos_get_node_memcg_used_bp() 2063 used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON); in damos_get_node_memcg_used_bp() 2064 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON); in damos_get_node_memcg_used_bp() 2065 used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE); in damos_get_node_memcg_used_bp() 2066 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE); in damos_get_node_memcg_used_bp()
|
| /linux/Documentation/trace/ |
| H A D | events-kmem.rst | 72 contention on the lruvec->lru_lock.
|